diff --git a/specs/openai.yaml b/specs/openai.yaml index 62bb66fca8..5f09487b5a 100644 --- a/specs/openai.yaml +++ b/specs/openai.yaml @@ -1,11314 +1,24907 @@ -openapi: 3.0.1 +openapi: 3.0.0 info: - title: OpenAI API - description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - termsOfService: https://openai.com/policies/terms-of-use - contact: - name: OpenAI Support - url: https://help.openai.com/ - license: - name: MIT - url: https://github.com/openai/openai-openapi/blob/master/LICENSE - version: '2.3.0' + title: OpenAI API + description: The OpenAI REST API. Please see + https://platform.openai.com/docs/api-reference for more details. + version: 2.3.0 + termsOfService: https://openai.com/policies/terms-of-use + contact: + name: OpenAI Support + url: https://help.openai.com/ + license: + name: MIT + url: https://github.com/openai/openai-openapi/blob/master/LICENSE servers: - - url: https://api.openai.com/v1 -paths: - /chat/completions: - post: - tags: - - Chat - summary: Creates a model response for the given chat conversation. - operationId: createChatCompletion - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionResponse' - x-oaiMeta: - name: Create chat completion - group: chat - returns: "Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed.\n" - path: create - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - - title: Image input - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"What'\\''s in this image?\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n }\n }\n ]\n }\n ],\n \"max_tokens\": 300\n }'\n" - python: "from openai import OpenAI\n\nclient = OpenAI()\n\nresponse = client.chat.completions.create(\n model=\"gpt-4o\",\n messages=[\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image_url\",\n \"image_url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n ],\n }\n ],\n max_tokens=300,\n)\n\nprint(response.choices[0])\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"What's in this image?\" },\n {\n type: \"image_url\",\n image_url:\n \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n ],\n },\n ],\n });\n console.log(response.choices[0]);\n}\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_model_id\",\n messages: [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" - response: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - - title: Functions - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What'\\''s the weather like in Boston today?\"\n }\n ],\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"tool_choice\": \"auto\"\n}'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\nmessages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=messages,\n tools=tools,\n tool_choice=\"auto\"\n)\n\nprint(completion)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}];\n const tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n ];\n\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: messages,\n tools: tools,\n tool_choice: \"auto\",\n });\n\n console.log(response);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99\n }\n}\n" - - title: Logprobs - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"logprobs\": true,\n \"top_logprobs\": 2\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n logprobs=True,\n top_logprobs=2\n)\n\nprint(completion.choices[0].message)\nprint(completion.choices[0].logprobs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"user\", content: \"Hello!\" }],\n model: \"VAR_model_id\",\n logprobs: true,\n top_logprobs: 2,\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18\n },\n \"system_fingerprint\": null\n}\n" - /completions: - post: - tags: - - Completions - summary: Creates a completion for the provided prompt and parameters. - operationId: createCompletion - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateCompletionRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateCompletionResponse' - x-oaiMeta: - name: Create completion - group: completions - returns: "Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed.\n" - legacy: true - examples: - - title: No streaming - request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n max_tokens: 7,\n temperature: 0,\n });\n\n console.log(completion);\n}\nmain();" - response: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"VAR_model_id\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0,\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nfor chunk in client.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0,\n stream=True\n):\n print(chunk.choices[0].text)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n stream: true,\n });\n\n for await (const chunk of stream) {\n console.log(chunk.choices[0].text)\n }\n}\nmain();" - response: "{\n \"id\": \"cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe\",\n \"object\": \"text_completion\",\n \"created\": 1690759702,\n \"choices\": [\n {\n \"text\": \"This\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": null\n }\n ],\n \"model\": \"gpt-3.5-turbo-instruct\"\n \"system_fingerprint\": \"fp_44709d6fcb\",\n}\n" - /images/generations: - post: - tags: - - Images - summary: Creates an image given a prompt. - operationId: createImage - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateImageRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - x-oaiMeta: - name: Create image - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/images/generations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"dall-e-3\",\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 1,\n \"size\": \"1024x1024\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.generate(\n model=\"dall-e-3\",\n prompt=\"A cute baby sea otter\",\n n=1,\n size=\"1024x1024\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.generate({ model: \"dall-e-3\", prompt: \"A cute baby sea otter\" });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/edits: - post: - tags: - - Images - summary: Creates an edited or extended image given an original image and a prompt. - operationId: createImageEdit - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageEditRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - x-oaiMeta: - name: Create image edit - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/images/edits \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F mask=\"@mask.png\" \\\n -F prompt=\"A cute baby sea otter wearing a beret\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.edit(\n image=open(\"otter.png\", \"rb\"),\n mask=open(\"mask.png\", \"rb\"),\n prompt=\"A cute baby sea otter wearing a beret\",\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.edit({\n image: fs.createReadStream(\"otter.png\"),\n mask: fs.createReadStream(\"mask.png\"),\n prompt: \"A cute baby sea otter wearing a beret\",\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/variations: - post: - tags: - - Images - summary: Creates a variation of a given image. - operationId: createImageVariation - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageVariationRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - x-oaiMeta: - name: Create image variation - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/images/variations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.images.create_variation(\n image=open(\"image_edit_original.png\", \"rb\"),\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.createVariation({\n image: fs.createReadStream(\"otter.png\"),\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /embeddings: - post: - tags: - - Embeddings - summary: Creates an embedding vector representing the input text. - operationId: createEmbedding - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingResponse' - x-oaiMeta: - name: Create embeddings - group: embeddings - returns: 'A list of [embedding](/docs/api-reference/embeddings/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/embeddings \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input\": \"The food was delicious and the waiter...\",\n \"model\": \"text-embedding-ada-002\",\n \"encoding_format\": \"float\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.embeddings.create(\n model=\"text-embedding-ada-002\",\n input=\"The food was delicious and the waiter...\",\n encoding_format=\"float\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const embedding = await openai.embeddings.create({\n model: \"text-embedding-ada-002\",\n input: \"The quick brown fox jumped over the lazy dog\",\n encoding_format: \"float\",\n });\n\n console.log(embedding);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": 8,\n \"total_tokens\": 8\n }\n}\n" - /audio/speech: - post: - tags: - - Audio - summary: Generates audio from the input text. - operationId: createSpeech - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateSpeechRequest' - required: true - responses: - '200': - description: OK - headers: - Transfer-Encoding: - description: chunked - schema: - type: string - content: - application/octet-stream: - schema: - type: string - format: binary - x-oaiMeta: - name: Create speech - group: audio - returns: The audio file content. - examples: - request: - curl: "curl https://api.openai.com/v1/audio/speech \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"model\": \"tts-1\",\n \"input\": \"The quick brown fox jumped over the lazy dog.\",\n \"voice\": \"alloy\"\n }' \\\n --output speech.mp3\n" - python: "from pathlib import Path\nimport openai\n\nspeech_file_path = Path(__file__).parent / \"speech.mp3\"\nresponse = openai.audio.speech.create(\n model=\"tts-1\",\n voice=\"alloy\",\n input=\"The quick brown fox jumped over the lazy dog.\"\n)\nresponse.stream_to_file(speech_file_path)\n" - node: "import fs from \"fs\";\nimport path from \"path\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst speechFile = path.resolve(\"./speech.mp3\");\n\nasync function main() {\n const mp3 = await openai.audio.speech.create({\n model: \"tts-1\",\n voice: \"alloy\",\n input: \"Today is a wonderful day to build something people love!\",\n });\n console.log(speechFile);\n const buffer = Buffer.from(await mp3.arrayBuffer());\n await fs.promises.writeFile(speechFile, buffer);\n}\nmain();\n" - /audio/transcriptions: - post: - tags: - - Audio - summary: Transcribes audio into the input language. - operationId: createTranscription - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranscriptionRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/CreateTranscriptionResponseJson' - - $ref: '#/components/schemas/CreateTranscriptionResponseVerboseJson' - x-oaiMeta: - name: Create transcription - group: audio - returns: 'The [transcription object](/docs/api-reference/audio/json-object) or a [verbose transcription object](/docs/api-reference/audio/verbose-json-object).' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/audio/transcriptions \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/audio.mp3\" \\\n -F model=\"whisper-1\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.transcriptions.create(\n model=\"whisper-1\",\n file=audio_file\n)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const transcription = await openai.audio.transcriptions.create({\n file: fs.createReadStream(\"audio.mp3\"),\n model: \"whisper-1\",\n });\n\n console.log(transcription.text);\n}\nmain();\n" - response: "{\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n}\n" - - title: Word timestamps - request: - curl: "curl https://api.openai.com/v1/audio/transcriptions \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/audio.mp3\" \\\n -F \"timestamp_granularities[]=word\" \\\n -F model=\"whisper-1\" \\\n -F response_format=\"verbose_json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.transcriptions.create(\n file=audio_file,\n model=\"whisper-1\",\n response_format=\"verbose_json\",\n timestamp_granularities=[\"word\"]\n)\n\nprint(transcript.words)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const transcription = await openai.audio.transcriptions.create({\n file: fs.createReadStream(\"audio.mp3\"),\n model: \"whisper-1\",\n response_format: \"verbose_json\",\n timestamp_granularities: [\"word\"]\n });\n\n console.log(transcription.text);\n}\nmain();\n" - response: "{\n \"task\": \"transcribe\",\n \"language\": \"english\",\n \"duration\": 8.470000267028809,\n \"text\": \"The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.\",\n \"words\": [\n {\n \"word\": \"The\",\n \"start\": 0.0,\n \"end\": 0.23999999463558197\n },\n ...\n {\n \"word\": \"volleyball\",\n \"start\": 7.400000095367432,\n \"end\": 7.900000095367432\n }\n ]\n}\n" - - title: Segment timestamps - request: - curl: "curl https://api.openai.com/v1/audio/transcriptions \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/audio.mp3\" \\\n -F \"timestamp_granularities[]=segment\" \\\n -F model=\"whisper-1\" \\\n -F response_format=\"verbose_json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.transcriptions.create(\n file=audio_file,\n model=\"whisper-1\",\n response_format=\"verbose_json\",\n timestamp_granularities=[\"segment\"]\n)\n\nprint(transcript.words)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const transcription = await openai.audio.transcriptions.create({\n file: fs.createReadStream(\"audio.mp3\"),\n model: \"whisper-1\",\n response_format: \"verbose_json\",\n timestamp_granularities: [\"segment\"]\n });\n\n console.log(transcription.text);\n}\nmain();\n" - response: "{\n \"task\": \"transcribe\",\n \"language\": \"english\",\n \"duration\": 8.470000267028809,\n \"text\": \"The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.\",\n \"segments\": [\n {\n \"id\": 0,\n \"seek\": 0,\n \"start\": 0.0,\n \"end\": 3.319999933242798,\n \"text\": \" The beach was a popular spot on a hot summer day.\",\n \"tokens\": [\n 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530\n ],\n \"temperature\": 0.0,\n \"avg_logprob\": -0.2860786020755768,\n \"compression_ratio\": 1.2363636493682861,\n \"no_speech_prob\": 0.00985979475080967\n },\n ...\n ]\n}\n" - /audio/translations: - post: - tags: - - Audio - summary: Translates audio into English. - operationId: createTranslation - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranslationRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/CreateTranslationResponseJson' - - $ref: '#/components/schemas/CreateTranslationResponseVerboseJson' - x-oaiMeta: - name: Create translation - group: audio - returns: The translated text. - examples: - request: - curl: "curl https://api.openai.com/v1/audio/translations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/german.m4a\" \\\n -F model=\"whisper-1\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.translations.create(\n model=\"whisper-1\",\n file=audio_file\n)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const translation = await openai.audio.translations.create({\n file: fs.createReadStream(\"speech.mp3\"),\n model: \"whisper-1\",\n });\n\n console.log(translation.text);\n}\nmain();\n" - response: "{\n \"text\": \"Hello, my name is Wolfgang and I come from Germany. Where are you heading today?\"\n}\n" - /files: - get: - tags: - - Files - summary: Returns a list of files that belong to the user's organization. - operationId: listFiles - parameters: - - name: purpose - in: query - description: Only return files with the given purpose. - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFilesResponse' - x-oaiMeta: - name: List files - group: files - returns: 'A list of [File](/docs/api-reference/files/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.files.list();\n\n for await (const file of list) {\n console.log(file);\n }\n}\n\nmain();" - response: "{\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 175,\n \"created_at\": 1613677385,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n },\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"puppy.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n ],\n \"object\": \"list\"\n}\n" - post: - tags: - - Files - summary: "Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.\n\nThe Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.\n\nThe Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.\n\nThe Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).\n\nPlease [contact us](https://help.openai.com/) if you need to increase these storage limits.\n" - operationId: createFile - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateFileRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFile' - x-oaiMeta: - name: Upload file - group: files - returns: 'The uploaded [File](/docs/api-reference/files/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F purpose=\"fine-tune\" \\\n -F file=\"@mydata.jsonl\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.create(\n file=open(\"mydata.jsonl\", \"rb\"),\n purpose=\"fine-tune\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.create({\n file: fs.createReadStream(\"mydata.jsonl\"),\n purpose: \"fine-tune\",\n });\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}': - delete: - tags: - - Files - summary: Delete a file. - operationId: deleteFile - parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteFileResponse' - x-oaiMeta: - name: Delete file - group: files - returns: Deletion status. - examples: - request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.delete(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.del(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"deleted\": true\n}\n" - get: - tags: - - Files - summary: Returns information about a specific file. - operationId: retrieveFile - parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFile' - x-oaiMeta: - name: Retrieve file - group: files - returns: 'The [File](/docs/api-reference/files/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.retrieve(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.retrieve(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}/content': - get: - tags: - - Files - summary: Returns the contents of the specified file. - operationId: downloadFile - parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - type: string - x-oaiMeta: - name: Retrieve file content - group: files - returns: The file content. - examples: - request: - curl: "curl https://api.openai.com/v1/files/file-abc123/content \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" > file.jsonl\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncontent = client.files.content(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.content(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();\n" - /uploads: - post: - tags: - - Uploads - summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search/supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" - operationId: createUpload - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateUploadRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Upload' - x-oaiMeta: - name: Create upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `pending`.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"purpose\": \"fine-tune\",\n \"filename\": \"training_examples.jsonl\",\n \"bytes\": 2147483648,\n \"mime_type\": \"text/jsonl\"\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"pending\",\n \"expires_at\": 1719127296\n}\n" - '/uploads/{upload_id}/parts': - post: - tags: - - Uploads - summary: "Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. \n\nEach Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB.\n\nIt is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete).\n" - operationId: addUploadPart - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/AddUploadPartRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/UploadPart' - x-oaiMeta: - name: Add upload part - group: uploads - returns: 'The upload [Part](/docs/api-reference/uploads/part-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/parts\n -F data=\"aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz...\"\n" - response: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719185911,\n \"upload_id\": \"upload_abc123\"\n}\n" - '/uploads/{upload_id}/complete': - post: - tags: - - Uploads - summary: "Completes the [Upload](/docs/api-reference/uploads/object). \n\nWithin the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform.\n\nYou can specify the order of the Parts by passing in an ordered list of the Part IDs.\n\nThe number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed.\n" - operationId: completeUpload - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CompleteUploadRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Upload' - x-oaiMeta: - name: Complete upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/complete\n -d '{\n \"part_ids\": [\"part_def456\", \"part_ghi789\"]\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - '/uploads/{upload_id}/cancel': - post: - tags: - - Uploads - summary: "Cancels the Upload. No Parts may be added after an Upload is cancelled.\n" - operationId: cancelUpload - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Upload' - x-oaiMeta: - name: Cancel upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/cancel\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"cancelled\",\n \"expires_at\": 1719127296\n}\n" - /fine_tuning/jobs: - post: - tags: - - Fine-tuning - summary: "Creates a fine-tuning job which begins the process of creating a new model from a given dataset.\n\nResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)\n" - operationId: createFineTuningJob - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateFineTuningJobRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - x-oaiMeta: - name: Create fine-tuning job - group: fine-tuning - returns: 'A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-BK7bzQj3FfZFXr7DbL6xJwfo\",\n \"model\": \"gpt-4o-mini\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n model=\"gpt-4o-mini\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\"\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n}\n" - - title: Epochs - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"hyperparameters\": {\n \"n_epochs\": 2\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n model=\"gpt-4o-mini\",\n hyperparameters={\n \"n_epochs\":2\n }\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\",\n model: \"gpt-4o-mini\",\n hyperparameters: { n_epochs: 2 }\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\"n_epochs\": 2},\n}\n" - - title: Validation file - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"validation_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n validation_file=\"file-def456\",\n model=\"gpt-4o-mini\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\",\n validation_file: \"file-abc123\"\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n}\n" - - title: W&B Integration - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"validation_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"integrations\": [\n {\n \"type\": \"wandb\",\n \"wandb\": {\n \"project\": \"my-wandb-project\",\n \"name\": \"ft-run-display-name\"\n \"tags\": [\n \"first-experiment\", \"v2\"\n ]\n }\n }\n ]\n }'\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n \"integrations\": [\n {\n \"type\": \"wandb\",\n \"wandb\": {\n \"project\": \"my-wandb-project\",\n \"entity\": None,\n \"run_id\": \"ftjob-abc123\"\n }\n }\n ]\n}\n" - get: - tags: - - Fine-tuning - summary: "List your organization's fine-tuning jobs\n" - operationId: listPaginatedFineTuningJobs - parameters: - - name: after - in: query - description: Identifier for the last job from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of fine-tuning jobs to retrieve. - schema: - type: integer - default: 20 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListPaginatedFineTuningJobsResponse' - x-oaiMeta: - name: List fine-tuning jobs - group: fine-tuning - returns: 'A list of paginated [fine-tuning job](/docs/api-reference/fine-tuning/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.jobs.list();\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-TjX0lMfOniCZX64t9PUQT5hn\",\n \"created_at\": 1689813489,\n \"level\": \"warn\",\n \"message\": \"Fine tuning process stopping due to job cancellation\",\n \"data\": null,\n \"type\": \"message\"\n },\n { ... },\n { ... }\n ], \"has_more\": true\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}': - get: - tags: - - Fine-tuning - summary: "Get info about a fine-tuning job.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)\n" - operationId: retrieveFineTuningJob - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - x-oaiMeta: - name: Retrieve fine-tuning job - group: fine-tuning - returns: 'The [fine-tuning](/docs/api-reference/fine-tuning/object) object with the given ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.retrieve(\"ftjob-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.retrieve(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/events': - get: - tags: - - Fine-tuning - summary: "Get status updates for a fine-tuning job.\n" - operationId: listFineTuningEvents - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to get events for.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - - name: after - in: query - description: Identifier for the last event from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of events to retrieve. - schema: - type: integer - default: 20 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuningJobEventsResponse' - x-oaiMeta: - name: List fine-tuning events - group: fine-tuning - returns: A list of fine-tuning event objects. - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list_events(\n fine_tuning_job_id=\"ftjob-abc123\",\n limit=2\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.list_events(id=\"ftjob-abc123\", limit=2);\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-ddTJfwuMVpfLXseO0Am0Gqjm\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"Fine tuning job successfully completed\",\n \"data\": null,\n \"type\": \"message\"\n },\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-tyiGuB72evQncpH87xe505Sv\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel\",\n \"data\": null,\n \"type\": \"message\"\n }\n ],\n \"has_more\": true\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/cancel': - post: - tags: - - Fine-tuning - summary: "Immediately cancel a fine-tune job.\n" - operationId: cancelFineTuningJob - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to cancel.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - x-oaiMeta: - name: Cancel fine-tuning - group: fine-tuning - returns: 'The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.cancel(\"ftjob-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.cancel(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\nmain();" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"hyperparameters\": {\n \"n_epochs\": \"auto\"\n },\n \"status\": \"cancelled\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\"\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints': - get: - tags: - - Fine-tuning - summary: "List checkpoints for a fine-tuning job.\n" - operationId: listFineTuningJobCheckpoints - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to get checkpoints for.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - - name: after - in: query - description: Identifier for the last checkpoint ID from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of checkpoints to retrieve. - schema: - type: integer - default: 10 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuningJobCheckpointsResponse' - x-oaiMeta: - name: List fine-tuning checkpoints - group: fine-tuning - returns: 'A list of fine-tuning [checkpoint objects](/docs/api-reference/fine-tuning/checkpoint-object) for a fine-tuning job.' - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - response: "{\n \"object\": \"list\"\n \"data\": [\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"created_at\": 1721764867,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000\",\n \"metrics\": {\n \"full_valid_loss\": 0.134,\n \"full_valid_mean_token_accuracy\": 0.874\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 2000,\n },\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"created_at\": 1721764800,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000\",\n \"metrics\": {\n \"full_valid_loss\": 0.167,\n \"full_valid_mean_token_accuracy\": 0.781\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 1000,\n },\n ],\n \"first_id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"last_id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"has_more\": true\n}\n" - /models: - get: - tags: - - Models - summary: 'Lists the currently available models, and provides basic information about each one such as the owner and availability.' - operationId: listModels - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListModelsResponse' - x-oaiMeta: - name: List models - group: models - returns: 'A list of [model](/docs/api-reference/models/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/models \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.models.list();\n\n for await (const model of list) {\n console.log(model);\n }\n}\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"model-id-0\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"organization-owner\"\n },\n {\n \"id\": \"model-id-1\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"organization-owner\",\n },\n {\n \"id\": \"model-id-2\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n },\n ],\n \"object\": \"list\"\n}\n" - '/models/{model}': - get: - tags: - - Models - summary: 'Retrieves a model instance, providing basic information about the model such as the owner and permissioning.' - operationId: retrieveModel - parameters: - - name: model - in: path - description: The ID of the model to use for this request - required: true - schema: - type: string - example: gpt-4o-mini - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Model' - x-oaiMeta: - name: Retrieve model - group: models - returns: 'The [model](/docs/api-reference/models/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/models/VAR_model_id \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.retrieve(\"VAR_model_id\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const model = await openai.models.retrieve(\"VAR_model_id\");\n\n console.log(model);\n}\n\nmain();" - response: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" - delete: - tags: - - Models - summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - operationId: deleteModel - parameters: - - name: model - in: path - description: The model to delete - required: true - schema: - type: string - example: ft:gpt-4o-mini:acemeco:suffix:abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteModelResponse' - x-oaiMeta: - name: Delete a fine-tuned model - group: models - returns: Deletion status. - examples: - request: - curl: "curl https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.delete(\"ft:gpt-4o-mini:acemeco:suffix:abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const model = await openai.models.del(\"ft:gpt-4o-mini:acemeco:suffix:abc123\");\n\n console.log(model);\n}\nmain();" - response: "{\n \"id\": \"ft:gpt-4o-mini:acemeco:suffix:abc123\",\n \"object\": \"model\",\n \"deleted\": true\n}\n" - /moderations: - post: - tags: - - Moderations - summary: Classifies if text is potentially harmful. - operationId: createModeration - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateModerationRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateModerationResponse' - x-oaiMeta: - name: Create moderation - group: moderations - returns: 'A [moderation](/docs/api-reference/moderations/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/moderations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"input\": \"I want to kill them.\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmoderation = client.moderations.create(input=\"I want to kill them.\")\nprint(moderation)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const moderation = await openai.moderations.create({ input: \"I want to kill them.\" });\n\n console.log(moderation);\n}\nmain();\n" - response: "{\n \"id\": \"modr-XXXXX\",\n \"model\": \"text-moderation-005\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"sexual\": false,\n \"hate\": false,\n \"harassment\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"hate/threatening\": false,\n \"violence/graphic\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"harassment/threatening\": true,\n \"violence\": true,\n },\n \"category_scores\": {\n \"sexual\": 1.2282071e-06,\n \"hate\": 0.010696256,\n \"harassment\": 0.29842457,\n \"self-harm\": 1.5236925e-08,\n \"sexual/minors\": 5.7246268e-08,\n \"hate/threatening\": 0.0060676364,\n \"violence/graphic\": 4.435014e-06,\n \"self-harm/intent\": 8.098441e-10,\n \"self-harm/instructions\": 2.8498655e-11,\n \"harassment/threatening\": 0.63055265,\n \"violence\": 0.99011886,\n }\n }\n ]\n}\n" - /assistants: - get: - tags: - - Assistants - summary: Returns a list of assistants. - operationId: listAssistants - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListAssistantsResponse' - x-oaiMeta: - name: List assistants - group: assistants - beta: true - returns: 'A list of [assistant](/docs/api-reference/assistants/object) objects.' - examples: - request: - curl: "curl \"https://api.openai.com/v1/assistants?order=desc&limit=20\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistants = client.beta.assistants.list(\n order=\"desc\",\n limit=\"20\",\n)\nprint(my_assistants.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistants = await openai.beta.assistants.list({\n order: \"desc\",\n limit: \"20\",\n });\n\n console.log(myAssistants.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - post: - tags: - - Assistants - summary: Create an assistant with a model and instructions. - operationId: createAssistant - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAssistantRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - x-oaiMeta: - name: Create assistant - group: assistants - beta: true - returns: 'An [assistant](/docs/api-reference/assistants/object) object.' - examples: - - title: Code Interpreter - request: - curl: "curl \"https://api.openai.com/v1/assistants\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"name\": \"Math Tutor\",\n \"tools\": [{\"type\": \"code_interpreter\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name=\"Math Tutor\",\n tools=[{\"type\": \"code_interpreter\"}],\n model=\"gpt-4o\",\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name: \"Math Tutor\",\n tools: [{ type: \"code_interpreter\" }],\n model: \"gpt-4o\",\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - - title: Files - request: - curl: "curl https://api.openai.com/v1/assistants \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"tool_resources\": {\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n tool_resources={\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n model=\"gpt-4o\"\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n tool_resources: {\n file_search: {\n vector_store_ids: [\"vs_123\"]\n }\n },\n model: \"gpt-4o\"\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009403,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - '/assistants/{assistant_id}': - get: - tags: - - Assistants - summary: Retrieves an assistant. - operationId: getAssistant - parameters: - - name: assistant_id - in: path - description: The ID of the assistant to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - x-oaiMeta: - name: Retrieve assistant - group: assistants - beta: true - returns: 'The [assistant](/docs/api-reference/assistants/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.retrieve(\"asst_abc123\")\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.retrieve(\n \"asst_abc123\"\n );\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - post: - tags: - - Assistants - summary: Modifies an assistant. - operationId: modifyAssistant - parameters: - - name: assistant_id - in: path - description: The ID of the assistant to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyAssistantRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - x-oaiMeta: - name: Modify assistant - group: assistants - beta: true - returns: 'The modified [assistant](/docs/api-reference/assistants/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_assistant = client.beta.assistants.update(\n \"asst_abc123\",\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n model=\"gpt-4o\"\n)\n\nprint(my_updated_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myUpdatedAssistant = await openai.beta.assistants.update(\n \"asst_abc123\",\n {\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n model: \"gpt-4o\"\n }\n );\n\n console.log(myUpdatedAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": []\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - delete: - tags: - - Assistants - summary: Delete an assistant. - operationId: deleteAssistant - parameters: - - name: assistant_id - in: path - description: The ID of the assistant to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteAssistantResponse' - x-oaiMeta: - name: Delete assistant - group: assistants - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.assistants.delete(\"asst_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.assistants.del(\"asst_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant.deleted\",\n \"deleted\": true\n}\n" - /threads: - post: - tags: - - Assistants - summary: Create a thread. - operationId: createThread - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateThreadRequest' - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - x-oaiMeta: - name: Create thread - group: threads - beta: true - returns: 'A [thread](/docs/api-reference/threads) object.' - examples: - - title: Empty - request: - curl: "curl https://api.openai.com/v1/threads \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d ''\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nempty_thread = client.beta.threads.create()\nprint(empty_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const emptyThread = await openai.beta.threads.create();\n\n console.log(emptyThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699012949,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - - title: Messages - request: - curl: "curl https://api.openai.com/v1/threads \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-H \"OpenAI-Beta: assistants=v2\" \\\n-d '{\n \"messages\": [{\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n }, {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage_thread = client.beta.threads.create(\n messages=[\n {\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n },\n {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n },\n ]\n)\n\nprint(message_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messageThread = await openai.beta.threads.create({\n messages: [\n {\n role: \"user\",\n content: \"Hello, what is AI?\"\n },\n {\n role: \"user\",\n content: \"How does AI work? Explain it in simple terms.\",\n },\n ],\n });\n\n console.log(messageThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - '/threads/{thread_id}': - get: - tags: - - Assistants - summary: Retrieves a thread. - operationId: getThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - x-oaiMeta: - name: Retrieve thread - group: threads - beta: true - returns: 'The [thread](/docs/api-reference/threads/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_thread = client.beta.threads.retrieve(\"thread_abc123\")\nprint(my_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myThread = await openai.beta.threads.retrieve(\n \"thread_abc123\"\n );\n\n console.log(myThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": []\n }\n }\n}\n" - post: - tags: - - Assistants - summary: Modifies a thread. - operationId: modifyThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to modify. Only the `metadata` can be modified. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyThreadRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - x-oaiMeta: - name: Modify thread - group: threads - beta: true - returns: 'The modified [thread](/docs/api-reference/threads/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_thread = client.beta.threads.update(\n \"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n)\nprint(my_updated_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const updatedThread = await openai.beta.threads.update(\n \"thread_abc123\",\n {\n metadata: { modified: \"true\", user: \"abc123\" },\n }\n );\n\n console.log(updatedThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n },\n \"tool_resources\": {}\n}\n" - delete: - tags: - - Assistants - summary: Delete a thread. - operationId: deleteThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteThreadResponse' - x-oaiMeta: - name: Delete thread - group: threads - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.threads.delete(\"thread_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.threads.del(\"thread_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread.deleted\",\n \"deleted\": true\n}\n" - '/threads/{thread_id}/messages': - get: - tags: - - Assistants - summary: Returns a list of messages for a given thread. - operationId: listMessages - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) the messages belong to.' - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: run_id - in: query - description: "Filter messages by the run ID that generated them.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListMessagesResponse' - x-oaiMeta: - name: List messages - group: threads - beta: true - returns: 'A list of [message](/docs/api-reference/messages) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_messages = client.beta.threads.messages.list(\"thread_abc123\")\nprint(thread_messages.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.list(\n \"thread_abc123\"\n );\n\n console.log(threadMessages.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n },\n {\n \"id\": \"msg_abc456\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hello, what is AI?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n }\n ],\n \"first_id\": \"msg_abc123\",\n \"last_id\": \"msg_abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Assistants - summary: Create a message. - operationId: createMessage - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to create a message for.' - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateMessageRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - x-oaiMeta: - name: Create message - group: threads - beta: true - returns: 'A [message](/docs/api-reference/messages/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_message = client.beta.threads.messages.create(\n \"thread_abc123\",\n role=\"user\",\n content=\"How does AI work? Explain it in simple terms.\",\n)\nprint(thread_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.create(\n \"thread_abc123\",\n { role: \"user\", content: \"How does AI work? Explain it in simple terms.\" }\n );\n\n console.log(threadMessages);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1713226573,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" - '/threads/{thread_id}/messages/{message_id}': - get: - tags: - - Assistants - summary: Retrieve a message. - operationId: getMessage - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this message belongs.' - required: true - schema: - type: string - - name: message_id - in: path - description: The ID of the message to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - x-oaiMeta: - name: Retrieve message - group: threads - beta: true - returns: 'The [message](/docs/api-reference/messages/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.retrieve(\n message_id=\"msg_abc123\",\n thread_id=\"thread_abc123\",\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.retrieve(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(message);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" - post: - tags: - - Assistants - summary: Modifies a message. - operationId: modifyMessage - parameters: - - name: thread_id - in: path - description: The ID of the thread to which this message belongs. - required: true - schema: - type: string - - name: message_id - in: path - description: The ID of the message to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyMessageRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - x-oaiMeta: - name: Modify message - group: threads - beta: true - returns: 'The modified [message](/docs/api-reference/messages/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.update(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\",\n },\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.update(\n \"thread_abc123\",\n \"msg_abc123\",\n {\n metadata: {\n modified: \"true\",\n user: \"abc123\",\n },\n }\n }'" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"file_ids\": [],\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n}\n" - delete: - tags: - - Assistants - summary: Deletes a message. - operationId: deleteMessage - parameters: - - name: thread_id - in: path - description: The ID of the thread to which this message belongs. - required: true - schema: - type: string - - name: message_id - in: path - description: The ID of the message to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteMessageResponse' - x-oaiMeta: - name: Delete message - group: threads - beta: true - returns: Deletion status - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_message = client.beta.threads.messages.delete(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n)\nprint(deleted_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedMessage = await openai.beta.threads.messages.del(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(deletedMessage);\n}" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message.deleted\",\n \"deleted\": true\n}\n" - /threads/runs: - post: - tags: - - Assistants - summary: Create a thread and run it in one request. - operationId: createThreadAndRun - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateThreadAndRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Create thread and run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.create_and_run(\n assistant_id=\"asst_abc123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_abc123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Explain deep learning to a 5 year old.\" },\n ],\n },\n });\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076792,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": null,\n \"expires_at\": 1699077392,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"required_action\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant.\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completion_tokens\": null,\n \"max_prompt_tokens\": null,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"incomplete_details\": null,\n \"usage\": null,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.create_and_run(\n assistant_id=\"asst_123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Hello\" },\n ],\n },\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710348075,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}], \"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\n{\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1713226836,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1713226837,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.create_and_run(\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"What is the weather like in San Francisco?\" },\n ],\n },\n tools: tools,\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710351818,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"\",\"output\":null}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"{\\\"\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"location\"}}]}}}\n\n...\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"ahrenheit\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"\\\"}\"}}]}}}\n\nevent: thread.run.requires_action\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"requires_action\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":{\"type\":\"submit_tool_outputs\",\"submit_tool_outputs\":{\"tool_calls\":[{\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\"}}]}},\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs': - get: - tags: - - Assistants - summary: Returns a list of runs belonging to a thread. - operationId: listRuns - parameters: - - name: thread_id - in: path - description: The ID of the thread the run belongs to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListRunsResponse' - x-oaiMeta: - name: List runs - group: threads - beta: true - returns: 'A list of [run](/docs/api-reference/runs/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nruns = client.beta.threads.runs.list(\n \"thread_abc123\"\n)\n\nprint(runs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const runs = await openai.beta.threads.runs.list(\n \"thread_abc123\"\n );\n\n console.log(runs);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n },\n {\n \"id\": \"run_abc456\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n }\n ],\n \"first_id\": \"run_abc123\",\n \"last_id\": \"run_abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Assistants - summary: Create a run. - operationId: createRun - parameters: - - name: thread_id - in: path - description: The ID of the thread to run. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Create run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n { assistant_id: \"asst_abc123\" }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_123\",\n assistant_id=\"asst_123\",\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_123\",\n { assistant_id: \"asst_123\", stream: true }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710330641,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710330642,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710330642,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710330641,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710330642,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n {\n assistant_id: \"asst_abc123\",\n tools: tools,\n stream: true\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710348075,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710348075,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710348077,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}': - get: - tags: - - Assistants - summary: Retrieves a run. - operationId: getRun - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Retrieve run - group: threads - beta: true - returns: 'The [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.retrieve(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - post: - tags: - - Assistants - summary: Modifies a run. - operationId: modifyRun - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Modify run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.update(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n metadata={\"user_id\": \"user_abc123\"},\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.update(\n \"thread_abc123\",\n \"run_abc123\",\n {\n metadata: {\n user_id: \"user_abc123\",\n },\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/submit_tool_outputs': - post: - tags: - - Assistants - summary: "When a run has the `status: \"requires_action\"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.\n" - operationId: submitToolOuputsToRun - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this run belongs.' - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run that requires the tool output submission. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SubmitToolOutputsRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Submit tool outputs to run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075592,\n \"assistant_id\": \"asst_123\",\n \"thread_id\": \"thread_123\",\n \"status\": \"queued\",\n \"started_at\": 1699075592,\n \"expires_at\": 1699076192,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710352449,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352475,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"id\":\"call_iWr0kQ2EaYMaxNdl0v3KYkx7\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\",\"output\":\"70 degrees and sunny.\"}}]},\"usage\":{\"prompt_tokens\":291,\"completion_tokens\":24,\"total_tokens\":315}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":1710352448,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710352475,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"The\",\"annotations\":[]}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" current\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" weather\"}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" sunny\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\".\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710352477,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352477,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":{\"prompt_tokens\":329,\"completion_tokens\":18,\"total_tokens\":347}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710352475,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710352477,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}/cancel': - post: - tags: - - Assistants - summary: Cancels a run that is `in_progress`. - operationId: cancelRun - parameters: - - name: thread_id - in: path - description: The ID of the thread to which this run belongs. - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to cancel. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Cancel a run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.cancel(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.cancel(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076126,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"cancelling\",\n \"started_at\": 1699076126,\n \"expires_at\": 1699076726,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You summarize books.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps': - get: - tags: - - Assistants - summary: Returns a list of run steps belonging to a run. - operationId: listRunSteps - parameters: - - name: thread_id - in: path - description: The ID of the thread the run and run steps belong to. - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run the run steps belong to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListRunStepsResponse' - x-oaiMeta: - name: List run steps - group: threads - beta: true - returns: 'A list of [run step](/docs/api-reference/runs/step-object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_steps = client.beta.threads.runs.steps.list(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run_steps)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.list(\n \"thread_abc123\",\n \"run_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n }\n ],\n \"first_id\": \"step_abc123\",\n \"last_id\": \"step_abc456\",\n \"has_more\": false\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps/{step_id}': - get: - tags: - - Assistants - summary: Retrieves a run step. - operationId: getRunStep - parameters: - - name: thread_id - in: path - description: The ID of the thread to which the run and run step belongs. - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to which the run step belongs. - required: true - schema: - type: string - - name: step_id - in: path - description: The ID of the run step to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunStepObject' - x-oaiMeta: - name: Retrieve run step - group: threads - beta: true - returns: 'The [run step](/docs/api-reference/runs/step-object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_step = client.beta.threads.runs.steps.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n step_id=\"step_abc123\"\n)\n\nprint(run_step)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.retrieve(\n \"thread_abc123\",\n \"run_abc123\",\n \"step_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - /vector_stores: - get: - tags: - - Vector Stores - summary: Returns a list of vector stores. - operationId: listVectorStores - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorStoresResponse' - x-oaiMeta: - name: List vector stores - group: vector_stores - beta: true - returns: 'A list of [vector store](/docs/api-reference/vector-stores/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_stores = client.beta.vector_stores.list()\nprint(vector_stores)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStores = await openai.beta.vectorStores.list();\n console.log(vectorStores);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n },\n {\n \"id\": \"vs_abc456\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ v2\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n }\n ],\n \"first_id\": \"vs_abc123\",\n \"last_id\": \"vs_abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Vector Stores - summary: Create a vector store. - operationId: createVectorStore - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreObject' - x-oaiMeta: - name: Create vector store - group: vector_stores - beta: true - returns: 'A [vector store](/docs/api-reference/vector-stores/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.create(\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.create({\n name: \"Support FAQ\"\n });\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" - '/vector_stores/{vector_store_id}': - get: - tags: - - Vector Stores - summary: Retrieves a vector store. - operationId: getVectorStore - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreObject' - x-oaiMeta: - name: Retrieve vector store - group: vector_stores - beta: true - returns: 'The [vector store](/docs/api-reference/vector-stores/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.retrieve(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.retrieve(\n \"vs_abc123\"\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776\n}\n" - post: - tags: - - Vector Stores - summary: Modifies a vector store. - operationId: modifyVectorStore - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/UpdateVectorStoreRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreObject' - x-oaiMeta: - name: Modify vector store - group: vector_stores - beta: true - returns: 'The modified [vector store](/docs/api-reference/vector-stores/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.update(\n vector_store_id=\"vs_abc123\",\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.update(\n \"vs_abc123\",\n {\n name: \"Support FAQ\"\n }\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" - delete: - tags: - - Vector Stores - summary: Delete a vector store. - operationId: deleteVectorStore - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteVectorStoreResponse' - x-oaiMeta: - name: Delete vector store - group: vector_stores - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store = client.beta.vector_stores.delete(\n vector_store_id=\"vs_abc123\"\n)\nprint(deleted_vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStore = await openai.beta.vectorStores.del(\n \"vs_abc123\"\n );\n console.log(deletedVectorStore);\n}\n\nmain();\n" - response: "{\n id: \"vs_abc123\",\n object: \"vector_store.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/files': - get: - tags: - - Vector Stores - summary: Returns a list of vector store files. - operationId: listVectorStoreFiles - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the files belong to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: filter - in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' - schema: - enum: - - in_progress - - completed - - failed - - cancelled - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' - x-oaiMeta: - name: List vector store files - group: vector_stores - beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.files.list(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.files.list(\n \"vs_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Vector Stores - summary: 'Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).' - operationId: createVectorStoreFile - parameters: - - name: vector_store_id - in: path - description: "The ID of the vector store for which to create a File.\n" - required: true - schema: - type: string - example: vs_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreFileRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileObject' - x-oaiMeta: - name: Create vector store file - group: vector_stores - beta: true - returns: 'A [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_id\": \"file-abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.create(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFile = await openai.beta.vectorStores.files.create(\n \"vs_abc123\",\n {\n file_id: \"file-abc123\"\n }\n );\n console.log(myVectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"usage_bytes\": 1234,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - '/vector_stores/{vector_store_id}/files/{file_id}': - get: - tags: - - Vector Stores - summary: Retrieves a vector store file. - operationId: getVectorStoreFile - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file belongs to. - required: true - schema: - type: string - example: vs_abc123 - - name: file_id - in: path - description: The ID of the file being retrieved. - required: true - schema: - type: string - example: file-abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileObject' - x-oaiMeta: - name: Retrieve vector store file - group: vector_stores - beta: true - returns: 'The [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.retrieve(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFile = await openai.beta.vectorStores.files.retrieve(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(vectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - delete: - tags: - - Vector Stores - summary: 'Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.' - operationId: deleteVectorStoreFile - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file belongs to. - required: true - schema: - type: string - - name: file_id - in: path - description: The ID of the file to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteVectorStoreFileResponse' - x-oaiMeta: - name: Delete vector store file - group: vector_stores - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file = client.beta.vector_stores.files.delete(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(deleted_vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFile = await openai.beta.vectorStores.files.del(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(deletedVectorStoreFile);\n}\n\nmain();\n" - response: "{\n id: \"file-abc123\",\n object: \"vector_store.file.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/file_batches': - post: - tags: - - Vector Stores - summary: Create a vector store file batch. - operationId: createVectorStoreFileBatch - parameters: - - name: vector_store_id - in: path - description: "The ID of the vector store for which to create a File Batch.\n" - required: true - schema: - type: string - example: vs_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreFileBatchRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - x-oaiMeta: - name: Create vector store file batch - group: vector_stores - beta: true - returns: 'A [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_ids\": [\"file-abc123\", \"file-abc456\"]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.create(\n vector_store_id=\"vs_abc123\",\n file_ids=[\"file-abc123\", \"file-abc456\"]\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create(\n \"vs_abc123\",\n {\n file_ids: [\"file-abc123\", \"file-abc456\"]\n }\n );\n console.log(myVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}': - get: - tags: - - Vector Stores - summary: Retrieves a vector store file batch. - operationId: getVectorStoreFileBatch - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file batch belongs to. - required: true - schema: - type: string - example: vs_abc123 - - name: batch_id - in: path - description: The ID of the file batch being retrieved. - required: true - schema: - type: string - example: vsfb_abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - x-oaiMeta: - name: Retrieve vector store file batch - group: vector_stores - beta: true - returns: 'The [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.retrieve(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel': - post: - tags: - - Vector Stores - summary: Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. - operationId: cancelVectorStoreFileBatch - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file batch belongs to. - required: true - schema: - type: string - - name: batch_id - in: path - description: The ID of the file batch to cancel. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - x-oaiMeta: - name: Cancel vector store file batch - group: vector_stores - beta: true - returns: The modified vector store file batch object. - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel(\n vector_store_id=\"vs_abc123\",\n file_batch_id=\"vsfb_abc123\"\n)\nprint(deleted_vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(deletedVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"cancelling\",\n \"file_counts\": {\n \"in_progress\": 12,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 15,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/files': - get: - tags: - - Vector Stores - summary: Returns a list of vector store files in a batch. - operationId: listFilesInVectorStoreBatch - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the files belong to. - required: true - schema: - type: string - - name: batch_id - in: path - description: The ID of the file batch that the files belong to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: filter - in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' - schema: - enum: - - in_progress - - completed - - failed - - cancelled - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' - x-oaiMeta: - name: List vector store files in a batch - group: vector_stores - beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.file_batches.list_files(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" - /batches: - post: - tags: - - Batch - summary: Creates and executes a batch from an uploaded file of requests - operationId: createBatch - requestBody: - content: - application/json: - schema: - required: - - input_file_id - - endpoint - - completion_window - type: object - properties: - input_file_id: - type: string - description: "The ID of an uploaded file that contains requests for the new batch.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size.\n" - endpoint: - enum: - - /v1/chat/completions - - /v1/embeddings - - /v1/completions - type: string - description: 'The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.' - completion_window: - enum: - - 24h - type: string - description: The time frame within which the batch should be processed. Currently only `24h` is supported. - metadata: - type: object - additionalProperties: - type: string - description: Optional custom metadata for the batch. - nullable: true - required: true - responses: - '200': - description: Batch created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Batch' - x-oaiMeta: - name: Create batch - group: batch - returns: 'The created [Batch](/docs/api-reference/batch/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input_file_id\": \"file-abc123\",\n \"endpoint\": \"/v1/chat/completions\",\n \"completion_window\": \"24h\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.create(\n input_file_id=\"file-abc123\",\n endpoint=\"/v1/chat/completions\",\n completion_window=\"24h\"\n)\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.create({\n input_file_id: \"file-abc123\",\n endpoint: \"/v1/chat/completions\",\n completion_window: \"24h\"\n });\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"validating\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": null,\n \"expires_at\": null,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 0,\n \"completed\": 0,\n \"failed\": 0\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - get: - tags: - - Batch - summary: List your organization's batches. - operationId: listBatches - parameters: - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - responses: - '200': - description: Batch listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ListBatchesResponse' - x-oaiMeta: - name: List batch - group: batch - returns: 'A list of paginated [Batch](/docs/api-reference/batch/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.list()\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.batches.list();\n\n for await (const batch of list) {\n console.log(batch);\n }\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly job\",\n }\n },\n { ... },\n ],\n \"first_id\": \"batch_abc123\",\n \"last_id\": \"batch_abc456\",\n \"has_more\": true\n}\n" - '/batches/{batch_id}': - get: - tags: - - Batch - summary: Retrieves a batch. - operationId: retrieveBatch - parameters: - - name: batch_id - in: path - description: The ID of the batch to retrieve. - required: true - schema: - type: string - responses: - '200': - description: Batch retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Batch' - x-oaiMeta: - name: Retrieve batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.retrieve(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.retrieve(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - '/batches/{batch_id}/cancel': - post: - tags: - - Batch - summary: 'Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file.' - operationId: cancelBatch - parameters: - - name: batch_id - in: path - description: The ID of the batch to cancel. - required: true - schema: - type: string - responses: - '200': - description: Batch is cancelling. Returns the cancelling batch's details. - content: - application/json: - schema: - $ref: '#/components/schemas/Batch' - x-oaiMeta: - name: Cancel batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.cancel(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.cancel(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"cancelling\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": 1711475133,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 23,\n \"failed\": 1\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - /organization/audit_logs: - get: - tags: - - Audit Logs - summary: List user actions and configuration changes within this organization. - operationId: list-audit-logs - parameters: - - name: effective_at - in: query - description: Return only events whose `effective_at` (Unix seconds) is in this range. - schema: - type: object - properties: - gt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than this value. - gte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. - lt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than this value. - lte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. - - name: 'project_ids[]' - in: query - description: Return only events for these projects. - schema: - type: array - items: - type: string - - name: 'event_types[]' - in: query - description: 'Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object).' - schema: - type: array - items: - $ref: '#/components/schemas/AuditLogEventType' - - name: 'actor_ids[]' - in: query - description: 'Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID.' - schema: - type: array - items: - type: string - - name: 'actor_emails[]' - in: query - description: Return only events performed by users with these emails. - schema: - type: array - items: - type: string - - name: 'resource_ids[]' - in: query - description: 'Return only events performed on these targets. For example, a project ID updated.' - schema: - type: array - items: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: Audit logs listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ListAuditLogsResponse' - x-oaiMeta: - name: List audit logs - group: audit-logs - returns: 'A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/audit_logs \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\" \\\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"audit_log-xxx_yyyymmdd\",\n \"type\": \"project.archived\",\n \"effective_at\": 1722461446,\n \"actor\": {\n \"type\": \"api_key\",\n \"api_key\": {\n \"type\": \"user\",\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n }\n }\n },\n \"project.archived\": {\n \"id\": \"proj_abc\"\n },\n },\n {\n \"id\": \"audit_log-yyy__20240101\",\n \"type\": \"api_key.updated\",\n \"effective_at\": 1720804190,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.updated\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource_2.operation_2\"]\n }\n },\n }\n ],\n \"first_id\": \"audit_log-xxx__20240101\",\n \"last_id\": \"audit_log_yyy__20240101\",\n \"has_more\": true\n}\n" - /organization/invites: - get: - tags: - - Invites - summary: Returns a list of invites in the organization. - operationId: list-invites - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Invites listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteListResponse' - x-oaiMeta: - name: List invites - group: administration - returns: 'A list of [Invite](/docs/api-reference/invite/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n }\n ],\n \"first_id\": \"invite-abc\",\n \"last_id\": \"invite-abc\",\n \"has_more\": false\n}\n" - post: - tags: - - Invites - summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. - operationId: inviteUser - requestBody: - description: The invite request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteRequest' - required: true - responses: - '200': - description: User invited successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Invite' - x-oaiMeta: - name: Create invite - group: administration - returns: 'The created [Invite](/docs/api-reference/invite/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/invites \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"email\": \"user@example.com\",\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": null\n}\n" - '/organization/invites/{invite_id}': - get: - tags: - - Invites - summary: Retrieves an invite. - operationId: retrieve-invite - parameters: - - name: invite_id - in: path - description: The ID of the invite to retrieve. - required: true - schema: - type: string - responses: - '200': - description: Invite retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Invite' - x-oaiMeta: - name: Retrieve invite - group: administration - returns: 'The [Invite](/docs/api-reference/invite/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - delete: - tags: - - Invites - summary: 'Delete an invite. If the invite has already been accepted, it cannot be deleted.' - operationId: delete-invite - parameters: - - name: invite_id - in: path - description: The ID of the invite to delete. - required: true - schema: - type: string - responses: - '200': - description: Invite deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteDeleteResponse' - x-oaiMeta: - name: Delete invite - group: administration - returns: Confirmation that the invite has been deleted - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite.deleted\",\n \"id\": \"invite-abc\",\n \"deleted\": true\n} \n" - /organization/users: - get: - tags: - - Users - summary: Lists all of the users in the organization. - operationId: list-users - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/UserListResponse' - x-oaiMeta: - name: List users - group: administration - returns: 'A list of [User](/docs/api-reference/users/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - '/organization/users/{user_id}': - get: - tags: - - Users - summary: Retrieves a user by their identifier. - operationId: retrieve-user - parameters: - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: User retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Retrieve user - group: administration - returns: 'The [User](/docs/api-reference/users/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - post: - tags: - - Users - summary: Modifies a user's role in the organization. - operationId: modify-user - requestBody: - description: The new user role to modify. This must be one of `owner` or `member`. - content: - application/json: - schema: - $ref: '#/components/schemas/UserRoleUpdateRequest' - required: true - responses: - '200': - description: User role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Modify user - group: administration - returns: 'The updated [User](/docs/api-reference/users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - delete: - tags: - - Users - summary: Deletes a user from the organization. - operationId: delete-user - parameters: - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: User deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/UserDeleteResponse' - x-oaiMeta: - name: Delete user - group: administration - returns: Confirmation of the deleted user - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n} \n" - /organization/projects: - get: - tags: - - Projects - summary: Returns a list of projects. - operationId: list-projects - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: include_archived - in: query - description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. - schema: - type: boolean - default: false - responses: - '200': - description: Projects listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectListResponse' - x-oaiMeta: - name: List projects - group: administration - returns: 'A list of [Project](/docs/api-reference/projects/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n }\n ],\n \"first_id\": \"proj-abc\",\n \"last_id\": \"proj-xyz\",\n \"has_more\": false\n}\n" - post: - tags: - - Projects - summary: 'Create a new project in the organization. Projects can be created and archived, but cannot be deleted.' - operationId: create-project - requestBody: - description: The project create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectCreateRequest' - required: true - responses: - '200': - description: Project created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Create project - group: administration - returns: 'The created [Project](/docs/api-reference/projects/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project ABC\"\n }'\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project ABC\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - '/organization/projects/{project_id}': - get: - tags: - - Projects - summary: Retrieves a project. - operationId: retrieve-project - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - responses: - '200': - description: Project retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Retrieve project - group: administration - description: Retrieve a project. - returns: 'The [Project](/docs/api-reference/projects/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - post: - tags: - - Projects - summary: Modifies a project in the organization. - operationId: modify-project - requestBody: - description: The project update request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUpdateRequest' - required: true - responses: - '200': - description: Project updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - '400': - description: Error response when updating the default project. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Modify project - group: administration - returns: 'The updated [Project](/docs/api-reference/projects/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project DEF\"\n }'\n" - '/organization/projects/{project_id}/archive': - post: - tags: - - Projects - summary: Archives a project in the organization. Archived projects cannot be used or updated. - operationId: archive-project - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - responses: - '200': - description: Project archived successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Archive project - group: administration - returns: 'The archived [Project](/docs/api-reference/projects/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project DEF\",\n \"created_at\": 1711471533,\n \"archived_at\": 1711471533,\n \"status\": \"archived\"\n}\n" - '/organization/projects/{project_id}/users': - get: - tags: - - Projects - summary: Returns a list of users in the project. - operationId: list-project-users - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Project users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserListResponse' - '400': - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: List project users - group: administration - returns: 'A list of [ProjectUser](/docs/api-reference/project-users/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - post: - tags: - - Projects - summary: Adds a user to the project. Users must already be members of the organization to be added to a project. - operationId: create-project-user - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - requestBody: - description: The project user create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserCreateRequest' - required: true - responses: - '200': - description: User added to project successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Create project user - group: administration - returns: 'The created [ProjectUser](/docs/api-reference/project-users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"user_id\": \"user_abc\",\n \"role\": \"member\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/users/{user_id}': - get: - tags: - - Projects - summary: Retrieves a user in the project. - operationId: retrieve-project-user - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: Project user retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - x-oaiMeta: - name: Retrieve project user - group: administration - returns: 'The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - post: - tags: - - Projects - summary: Modifies a user's role in the project. - operationId: modify-project-user - requestBody: - description: The project user update request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserUpdateRequest' - required: true - responses: - '200': - description: Project user's role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Modify project user - group: administration - returns: 'The updated [ProjectUser](/docs/api-reference/project-users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - delete: - tags: - - Projects - summary: Deletes a user from the project. - operationId: delete-project-user - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: Project user deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserDeleteResponse' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Delete project user - group: administration - returns: 'Confirmation that project has been deleted or an error in case of an archived project, which has no users' - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/service_accounts': - get: - tags: - - Projects - summary: Returns a list of service accounts in the project. - operationId: list-project-service-accounts - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Project service accounts listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountListResponse' - '400': - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: List project service accounts - group: administration - returns: 'A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n ],\n \"first_id\": \"svc_acct_abc\",\n \"last_id\": \"svc_acct_xyz\",\n \"has_more\": false\n}\n" - post: - tags: - - Projects - summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. - operationId: create-project-service-account - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - requestBody: - description: The project service account create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' - required: true - responses: - '200': - description: Project service account created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' - '400': - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Create project service account - group: administration - returns: 'The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Production App\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Production App\",\n \"role\": \"member\",\n \"created_at\": 1711471533,\n \"api_key\": {\n \"object\": \"organization.project.service_account.api_key\",\n \"value\": \"sk-abcdefghijklmnop123\",\n \"name\": \"Secret Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\"\n }\n}\n" - '/organization/projects/{project_id}/service_accounts/{service_account_id}': - get: - tags: - - Projects - summary: Retrieves a service account in the project. - operationId: retrieve-project-service-account - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true - schema: - type: string - responses: - '200': - description: Project service account retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccount' - x-oaiMeta: - name: Retrieve project service account - group: administration - returns: 'The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - delete: - tags: - - Projects - summary: Deletes a service account from the project. - operationId: delete-project-service-account - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true - schema: - type: string - responses: - '200': - description: Project service account deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' - x-oaiMeta: - name: Delete project service account - group: administration - returns: 'Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts' - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account.deleted\",\n \"id\": \"svc_acct_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/api_keys': - get: - tags: - - Projects - summary: Returns a list of API keys in the project. - operationId: list-project-api-keys - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Project API keys listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKeyListResponse' - x-oaiMeta: - name: List project API keys - group: administration - returns: 'A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n }\n ],\n \"first_id\": \"key_abc\",\n \"last_id\": \"key_xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/api_keys/{key_id}': - get: - tags: - - Projects - summary: Retrieves an API key in the project. - operationId: retrieve-project-api-key - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: key_id - in: path - description: The ID of the API key. - required: true - schema: - type: string - responses: - '200': - description: Project API key retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKey' - x-oaiMeta: - name: Retrieve project API key - group: administration - returns: 'The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" - delete: - tags: - - Projects - summary: Deletes an API key from the project. - operationId: delete-project-api-key - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: key_id - in: path - description: The ID of the API key. - required: true - schema: - type: string - responses: - '200': - description: Project API key deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Delete project API key - group: administration - returns: Confirmation of the key's deletion or an error if the key belonged to a service account - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key.deleted\",\n \"id\": \"key_abc\",\n \"deleted\": true\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"API keys cannot be deleted for service accounts, please delete the service account\"\n} \n" -components: - schemas: - Error: - required: - - type - - message - - param - - code - type: object - properties: - code: - type: string - nullable: true - message: - type: string - param: - type: string - nullable: true - type: - type: string - ErrorResponse: - required: - - error - type: object - properties: - error: - $ref: '#/components/schemas/Error' - ListModelsResponse: - required: - - object - - data - type: object - properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/Model' - DeleteModelResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - CreateCompletionRequest: - required: - - model - - prompt - type: object - properties: - model: - anyOf: - - type: string - - enum: - - gpt-3.5-turbo-instruct - - davinci-002 - - babbage-002 - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - x-oaiTypeLabel: string - prompt: - oneOf: - - type: string - default: '' - example: This is a test. - - type: array - items: - type: string - default: '' - example: This is a test. - - minItems: 1 - type: array - items: - type: integer - example: '[1212, 318, 257, 1332, 13]' - - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - example: '[[1212, 318, 257, 1332, 13]]' - description: "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n" - default: <|endoftext|> - nullable: true - best_of: - maximum: 20 - minimum: 0 - type: integer - description: "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" - default: 1 - nullable: true - echo: - type: boolean - description: "Echo back the prompt in addition to the completion\n" - default: false - nullable: true - frequency_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - logit_bias: - type: object - additionalProperties: - type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n" - default: - nullable: true - x-oaiTypeLabel: map - logprobs: - maximum: 5 - minimum: 0 - type: integer - description: "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n" - default: - nullable: true - max_tokens: - minimum: 0 - type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - default: 16 - nullable: true - example: 16 - n: - maximum: 128 - minimum: 1 - type: integer - description: "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" - default: 1 - nullable: true - example: 1 - presence_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 - type: integer - description: "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - stop: - oneOf: - - type: string - default: <|endoftext|> - nullable: true - example: "\n" - - maxItems: 4 - minItems: 1 - type: array - items: - type: string - example: '["\n"]' - description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.\n" - default: - nullable: true - stream: - type: boolean - description: "Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" - default: false - nullable: true - stream_options: - $ref: '#/components/schemas/ChatCompletionStreamOptions' - suffix: - type: string - description: "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n" - default: - nullable: true - example: test. - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" - default: 1 - nullable: true - example: 1 - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateCompletionResponse: - required: - - id - - object - - created - - model - - choices - type: object - properties: - id: - type: string - description: A unique identifier for the completion. - choices: - type: array - items: - required: - - finish_reason - - index - - logprobs - - text - type: object - properties: - finish_reason: - enum: - - stop - - length - - content_filter - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" - index: - type: integer - logprobs: - type: object - properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: - type: array - items: - type: string - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: number - nullable: true - text: - type: string - description: The list of completion choices the model generated for the input prompt. - created: - type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for completion. - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - text_completion - type: string - description: 'The object type, which is always "text_completion"' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: "Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).\n" - x-oaiMeta: - name: The completion object - legacy: true - example: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"gpt-4-turbo\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - ChatCompletionRequestMessageContentPartText: - title: Text content part - required: - - type - - text - type: object - properties: - type: - enum: - - text - type: string - description: The type of the content part. - text: - type: string - description: The text content. - ChatCompletionRequestMessageContentPartImage: - title: Image content part - required: - - type - - image_url - type: object - properties: - type: - enum: - - image_url - type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).' - default: auto - ChatCompletionRequestMessageContentPartRefusal: - title: Refusal content part - required: - - type - - refusal - type: object - properties: - type: - enum: - - refusal - type: string - description: The type of the content part. - refusal: - type: string - description: The refusal message generated by the model. - ChatCompletionRequestMessage: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - ChatCompletionRequestSystemMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestUserMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' - x-oaiExpandable: true - ChatCompletionRequestAssistantMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' - x-oaiExpandable: true - ChatCompletionRequestToolMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestSystemMessage: - title: System message - required: - - content - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The contents of the system message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestSystemMessageContentPart' - description: 'An array of content parts with a defined type. For system messages, only type `text` is supported.' - description: The contents of the system message. - role: - enum: - - system - type: string - description: 'The role of the messages author, in this case `system`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestUserMessage: - title: User message - required: - - content - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The text contents of the message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContentPart' - description: 'An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model.' - description: "The contents of the user message.\n" - x-oaiExpandable: true - role: - enum: - - user - type: string - description: 'The role of the messages author, in this case `user`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestAssistantMessage: - title: Assistant message - required: - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The contents of the assistant message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessageContentPart' - description: 'An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.' - description: "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n" - nullable: true - refusal: - type: string - description: The refusal message by the assistant. - nullable: true - role: - enum: - - assistant - type: string - description: 'The role of the messages author, in this case `assistant`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - function_call: - required: - - arguments - - name - type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - nullable: true - deprecated: true - FineTuneChatCompletionRequestAssistantMessage: - required: - - role - allOf: - - title: Assistant message - type: object - properties: - weight: - enum: - - 0 - - 1 - type: integer - description: Controls whether the assistant message is trained against (0 or 1) - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - ChatCompletionRequestToolMessage: - title: Tool message - required: - - role - - content - - tool_call_id - type: object - properties: - role: - enum: - - tool - type: string - description: 'The role of the messages author, in this case `tool`.' - content: - oneOf: - - title: Text content - type: string - description: The contents of the tool message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestToolMessageContentPart' - description: 'An array of content parts with a defined type. For tool messages, only type `text` is supported.' - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - ChatCompletionRequestFunctionMessage: - title: Function message - required: - - role - - content - - name - type: object - properties: - role: - enum: - - function - type: string - description: 'The role of the messages author, in this case `function`.' - content: - type: string - description: The contents of the function message. - nullable: true - name: - type: string - description: The name of the function to call. - deprecated: true - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - ChatCompletionFunctions: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - deprecated: true - ChatCompletionFunctionCallOption: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n" - ChatCompletionTool: - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - $ref: '#/components/schemas/FunctionObject' - FunctionObject: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - strict: - type: boolean - description: 'Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).' - default: false - nullable: true - ResponseFormatText: - required: - - type - type: object - properties: - type: - enum: - - text - type: string - description: 'The type of response format being defined: `text`' - ResponseFormatJsonObject: - required: - - type - type: object - properties: - type: - enum: - - json_object - type: string - description: 'The type of response format being defined: `json_object`' - ResponseFormatJsonSchemaSchema: - type: object - description: 'The schema for the response format, described as a JSON Schema object.' - ResponseFormatJsonSchema: - required: - - type - - json_schema - type: object - properties: - type: - enum: - - json_schema - type: string - description: 'The type of response format being defined: `json_schema`' - json_schema: - required: - - type - - name - type: object - properties: - description: - type: string - description: 'A description of what the response format is for, used by the model to determine how to respond in the format.' - name: - type: string - description: 'The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - schema: - $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' - strict: - type: boolean - description: 'Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs).' - default: false - nullable: true - ChatCompletionToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required - type: string - description: "`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.\n" - - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" - x-oaiExpandable: true - ChatCompletionNamedToolChoice: - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific function. - ParallelToolCalls: - type: boolean - description: 'Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.' - nullable: true - ChatCompletionMessageToolCalls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCall' - description: 'The tool calls generated by the model, such as function calls.' - ChatCompletionMessageToolCall: - required: - - id - - type - - function - type: object - properties: - id: - type: string - description: The ID of the tool call. - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name - - arguments - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - description: The function that the model called. - ChatCompletionMessageToolCallChunk: - required: - - index - type: object - properties: - index: - type: integer - id: - type: string - description: The ID of the tool call. - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - ChatCompletionRole: - enum: - - system - - user - - assistant - - tool - - function - type: string - description: The role of the author of a message - ChatCompletionStreamOptions: - type: object - properties: - include_usage: - type: boolean - description: "If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.\n" - description: "Options for streaming response. Only set this when you set `stream: true`.\n" - default: - nullable: true - ChatCompletionResponseMessage: - required: - - role - - content - - refusal - type: object - properties: - content: - type: string - description: The contents of the message. - nullable: true - refusal: - type: string - description: The refusal message generated by the model. - nullable: true - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - role: - enum: - - assistant - type: string - description: The role of the author of this message. - function_call: - required: - - name - - arguments - type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - description: A chat completion message generated by the model. - ChatCompletionStreamResponseDelta: - type: object - properties: - content: - type: string - description: The contents of the chunk message. - nullable: true - function_call: - type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCallChunk' - role: - enum: - - system - - user - - assistant - - tool - type: string - description: The role of the author of this message. - refusal: - type: string - description: The refusal message generated by the model. - nullable: true - description: A chat completion delta generated by streamed model responses. - CreateChatCompletionRequest: - required: - - model - - messages - type: object - properties: - messages: - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessage' - description: 'A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0301 - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.' - example: gpt-4o - x-oaiTypeLabel: string - frequency_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - logit_bias: - type: object - additionalProperties: - type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n" - default: - nullable: true - x-oaiTypeLabel: map - logprobs: - type: boolean - description: 'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.' - default: false - nullable: true - top_logprobs: - maximum: 20 - minimum: 0 - type: integer - description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.' - nullable: true - max_tokens: - type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - nullable: true - n: - maximum: 128 - minimum: 1 - type: integer - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - default: 1 - nullable: true - example: 1 - presence_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - response_format: - oneOf: - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - x-oaiExpandable: true - seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 - type: integer - description: "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - x-oaiMeta: - beta: true - service_tier: - enum: - - auto - - default - type: string - description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', the system will utilize scale tier credits until they are exhausted.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" - default: - nullable: true - stop: - oneOf: - - type: string - nullable: true - - maxItems: 4 - minItems: 1 - type: array - items: - type: string - description: "Up to 4 sequences where the API will stop generating further tokens.\n" - default: - stream: - type: boolean - description: "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" - default: false - nullable: true - stream_options: - $ref: '#/components/schemas/ChatCompletionStreamOptions' - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" - default: 1 - nullable: true - example: 1 - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n" - tool_choice: - $ref: '#/components/schemas/ChatCompletionToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - function_call: - oneOf: - - enum: - - none - - auto - type: string - description: "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.\n" - - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.\n" - deprecated: true - x-oaiExpandable: true - functions: - maxItems: 128 - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n" - deprecated: true - CreateChatCompletionResponse: - required: - - choices - - created - - id - - model - - object - type: object - properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - items: - required: - - finish_reason - - index - - message - type: object - properties: - finish_reason: - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - logprobs: - required: - - content - - refusal - type: object - properties: - content: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. - nullable: true - refusal: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. - nullable: true - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - service_tier: - enum: - - scale - - default - type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - nullable: true - example: scale - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - chat.completion - type: string - description: 'The object type, which is always `chat.completion`.' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - CreateChatCompletionFunctionResponse: - required: - - choices - - created - - id - - model - - object - type: object - properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - items: - required: - - finish_reason - - index - - message - - logprobs - type: object - properties: - finish_reason: - enum: - - stop - - length - - function_call - - content_filter - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function.\n" - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - chat.completion - type: string - description: 'The object type, which is always `chat.completion`.' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99\n }\n}\n" - ChatCompletionTokenLogprob: - required: - - token - - logprob - - bytes - - top_logprobs - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: - type: array - items: - type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - top_logprobs: - type: array - items: - required: - - token - - logprob - - bytes - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: - type: array - items: - type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - description: 'List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.' - ListPaginatedFineTuningJobsResponse: - required: - - object - - data - - has_more - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJob' - has_more: - type: boolean - object: - enum: - - list - type: string - CreateChatCompletionStreamResponse: - required: - - choices - - created - - id - - model - - object - type: object - properties: - id: - type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: - type: array - items: - required: - - delta - - finish_reason - - index - type: object - properties: - delta: - $ref: '#/components/schemas/ChatCompletionStreamResponseDelta' - logprobs: - required: - - content - - refusal - type: object - properties: - content: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. - nullable: true - refusal: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. - nullable: true - finish_reason: - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" - nullable: true - index: - type: integer - description: The index of the choice in the list of choices. - description: "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - model: - type: string - description: The model to generate the completion. - service_tier: - enum: - - scale - - default - type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - nullable: true - example: scale - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - chat.completion.chunk - type: string - description: 'The object type, which is always `chat.completion.chunk`.' - usage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - description: "An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request.\nWhen present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n" - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - CreateChatCompletionImageResponse: - type: object - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - CreateImageRequest: - required: - - prompt - type: object - properties: - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. - example: A cute baby sea otter - model: - anyOf: - - type: string - - enum: - - dall-e-2 - - dall-e-3 - type: string - description: The model to use for image generation. - default: dall-e-2 - nullable: true - example: dall-e-3 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: 'The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.' - default: 1 - nullable: true - example: 1 - quality: - enum: - - standard - - hd - type: string - description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - default: standard - example: standard - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - - 1792x1024 - - 1024x1792 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.' - default: 1024x1024 - nullable: true - example: 1024x1024 - style: - enum: - - vivid - - natural - type: string - description: 'The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`.' - default: vivid - nullable: true - example: vivid - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - ImagesResponse: - required: - - created - - data - properties: - created: - type: integer - data: - type: array - items: - $ref: '#/components/schemas/Image' - Image: - type: object - properties: - b64_json: - type: string - description: 'The base64-encoded JSON of the generated image, if `response_format` is `b64_json`.' - url: - type: string - description: 'The URL of the generated image, if `response_format` is `url` (default).' - revised_prompt: - type: string - description: 'The prompt that was used to generate the image, if there was any revision to the prompt.' - description: Represents the url or the content of an image generated by the OpenAI API. - x-oaiMeta: - name: The image object - example: "{\n \"url\": \"...\",\n \"revised_prompt\": \"...\"\n}\n" - CreateImageEditRequest: - required: - - prompt - - image - type: object - properties: - image: - type: string - description: 'The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.' - format: binary - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - example: A cute baby sea otter wearing a beret - mask: - type: string - description: 'An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.' - format: binary - model: - anyOf: - - type: string - - enum: - - dall-e-2 - type: string - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - default: dall-e-2 - nullable: true - example: dall-e-2 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: The number of images to generate. Must be between 1 and 10. - default: 1 - nullable: true - example: 1 - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' - default: 1024x1024 - nullable: true - example: 1024x1024 - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateImageVariationRequest: - required: - - image - type: object - properties: - image: - type: string - description: 'The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.' - format: binary - model: - anyOf: - - type: string - - enum: - - dall-e-2 - type: string - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - default: dall-e-2 - nullable: true - example: dall-e-2 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: 'The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.' - default: 1 - nullable: true - example: 1 - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' - default: 1024x1024 - nullable: true - example: 1024x1024 - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateModerationRequest: - required: - - input - type: object - properties: - input: - oneOf: - - type: string - default: '' - example: I want to kill them. - - type: array - items: - type: string - default: '' - example: I want to kill them. - description: The input text to classify - model: - anyOf: - - type: string - - enum: - - text-moderation-latest - - text-moderation-stable - type: string - description: "Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.\n\nThe default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.\n" - default: text-moderation-latest - example: text-moderation-stable - x-oaiTypeLabel: string - CreateModerationResponse: - required: - - id - - model - - results - type: object - properties: - id: - type: string - description: The unique identifier for the moderation request. - model: - type: string - description: The model used to generate the moderation results. - results: - type: array - items: - required: - - flagged - - categories - - category_scores - type: object - properties: - flagged: - type: boolean - description: Whether any of the below categories are flagged. - categories: - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - type: object - properties: - hate: - type: boolean - description: 'Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment.' - hate/threatening: - type: boolean - description: 'Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.' - harassment: - type: boolean - description: 'Content that expresses, incites, or promotes harassing language towards any target.' - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: 'Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.' - self-harm/intent: - type: boolean - description: 'Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.' - self-harm/instructions: - type: boolean - description: 'Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.' - sexual: - type: boolean - description: 'Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).' - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: 'Content that depicts death, violence, or physical injury.' - violence/graphic: - type: boolean - description: 'Content that depicts death, violence, or physical injury in graphic detail.' - description: 'A list of the categories, and whether they are flagged or not.' - category_scores: - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - type: object - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - description: A list of the categories along with their scores as predicted by model. - description: A list of moderation objects. - description: Represents if a given text input is potentially harmful. - x-oaiMeta: - name: The moderation object - example: "{\n \"id\": \"modr-XXXXX\",\n \"model\": \"text-moderation-005\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"sexual\": false,\n \"hate\": false,\n \"harassment\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"hate/threatening\": false,\n \"violence/graphic\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"harassment/threatening\": true,\n \"violence\": true,\n },\n \"category_scores\": {\n \"sexual\": 1.2282071e-06,\n \"hate\": 0.010696256,\n \"harassment\": 0.29842457,\n \"self-harm\": 1.5236925e-08,\n \"sexual/minors\": 5.7246268e-08,\n \"hate/threatening\": 0.0060676364,\n \"violence/graphic\": 4.435014e-06,\n \"self-harm/intent\": 8.098441e-10,\n \"self-harm/instructions\": 2.8498655e-11,\n \"harassment/threatening\": 0.63055265,\n \"violence\": 0.99011886,\n }\n }\n ]\n}\n" - ListFilesResponse: - required: - - object - - data - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - object: - enum: - - list - type: string - CreateFileRequest: - required: - - file - - purpose - type: object - properties: - file: - type: string - description: "The File object (not file name) to be uploaded.\n" - format: binary - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nUse \"assistants\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \"vision\" for Assistants image file inputs, \"batch\" for [Batch API](/docs/guides/batch), and \"fine-tune\" for [Fine-tuning](/docs/api-reference/fine-tuning).\n" - additionalProperties: false - DeleteFileResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - object: - enum: - - file - type: string - deleted: - type: boolean - CreateUploadRequest: - required: - - filename - - purpose - - bytes - - mime_type - type: object - properties: - filename: - type: string - description: "The name of the file to upload.\n" - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nSee the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).\n" - bytes: - type: integer - description: "The number of bytes in the file you are uploading.\n" - mime_type: - type: string - description: "The MIME type of the file.\n\nThis must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision.\n" - additionalProperties: false - AddUploadPartRequest: - required: - - data - type: object - properties: - data: - type: string - description: "The chunk of bytes for this Part.\n" - format: binary - additionalProperties: false - CompleteUploadRequest: - required: - - part_ids - type: object - properties: - part_ids: - type: array - items: - type: string - description: "The ordered list of Part IDs.\n" - md5: - type: string - description: "The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect.\n" - additionalProperties: false - CancelUploadRequest: - type: object - additionalProperties: false - CreateFineTuningJobRequest: - required: - - model - - training_file - type: object - properties: - model: - anyOf: - - type: string - - enum: - - babbage-002 - - davinci-002 - - gpt-3.5-turbo - - gpt-4o-mini - type: string - description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).\n" - example: gpt-4o-mini - x-oaiTypeLabel: string - training_file: - type: string - description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" - example: file-abc123 - hyperparameters: - type: object - properties: - batch_size: - oneOf: - - enum: - - auto - type: string - - maximum: 256 - minimum: 1 - type: integer - description: "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.\n" - default: auto - learning_rate_multiplier: - oneOf: - - enum: - - auto - type: string - - minimum: 0 - exclusiveMinimum: true - type: number - description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.\n" - default: auto - n_epochs: - oneOf: - - enum: - - auto - type: string - - maximum: 50 - minimum: 1 - type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" - default: auto - description: The hyperparameters used for the fine-tuning job. - suffix: - maxLength: 40 - minLength: 1 - type: string - description: "A string of up to 18 characters that will be added to your fine-tuned model name.\n\nFor example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.\n" - default: - nullable: true - validation_file: - type: string - description: "The ID of an uploaded file that contains validation data.\n\nIf you provide this file, the data is used to generate validation\nmetrics periodically during fine-tuning. These metrics can be viewed in\nthe fine-tuning results file.\nThe same data should not be present in both train and validation files.\n\nYour dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" - nullable: true - example: file-abc123 - integrations: - type: array - items: - required: - - type - - wandb - type: object - properties: - type: - oneOf: - - enum: - - wandb - type: string - description: "The type of integration to enable. Currently, only \"wandb\" (Weights and Biases) is supported.\n" - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - description: A list of integrations to enable for your fine-tuning job. - nullable: true - seed: - maximum: 2147483647 - minimum: 0 - type: integer - description: "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you.\n" - nullable: true - example: 42 - ListFineTuningJobEventsResponse: - required: - - object - - data - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobEvent' - object: - enum: - - list - type: string - ListFineTuningJobCheckpointsResponse: - required: - - object - - data - - has_more - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobCheckpoint' - object: - enum: - - list - type: string - first_id: - type: string - nullable: true - last_id: - type: string - nullable: true - has_more: - type: boolean - CreateEmbeddingRequest: - required: - - model - - input - type: object - properties: - input: - oneOf: - - title: string - type: string - description: The string that will be turned into an embedding. - default: '' - example: This is a test. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: string - default: '' - example: '[''This is a test.'']' - description: The array of strings that will be turned into an embedding. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: integer - description: The array of integers that will be turned into an embedding. - example: '[1212, 318, 257, 1332, 13]' - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - description: The array of arrays containing integers that will be turned into an embedding. - example: '[[1212, 318, 257, 1332, 13]]' - description: "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - example: The quick brown fox jumped over the lazy dog - x-oaiExpandable: true - model: - anyOf: - - type: string - - enum: - - text-embedding-ada-002 - - text-embedding-3-small - - text-embedding-3-large - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: text-embedding-3-small - x-oaiTypeLabel: string - encoding_format: - enum: - - float - - base64 - type: string - description: 'The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).' - default: float - example: float - dimensions: - minimum: 1 - type: integer - description: "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.\n" - nullable: true - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - additionalProperties: false - CreateEmbeddingResponse: - required: - - object - - model - - data - - usage - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Embedding' - description: The list of embeddings generated by the model. - model: - type: string - description: The name of the model used to generate the embedding. - object: - enum: - - list - type: string - description: 'The object type, which is always "list".' - usage: - required: - - prompt_tokens - - total_tokens - type: object - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - description: The usage information for the request. - CreateTranscriptionRequest: - required: - - file - - model - type: object - properties: - file: - type: string - description: "The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n" - format: binary - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - enum: - - whisper-1 - type: string - description: "ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available.\n" - example: whisper-1 - x-oaiTypeLabel: string - language: - type: string - description: "The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.\n" - prompt: - type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.\n" - response_format: - enum: - - json - - text - - srt - - verbose_json - - vtt - type: string - description: "The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" - default: json - temperature: - type: number - description: "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n" - default: 0 - 'timestamp_granularities[]': - type: array - items: - enum: - - word - - segment - type: string - description: "The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.\n" - default: - - segment - additionalProperties: false - CreateTranscriptionResponseJson: - required: - - text - type: object - properties: - text: - type: string - description: The transcribed text. - description: 'Represents a transcription response returned by model, based on the provided input.' - x-oaiMeta: - name: The transcription object (JSON) - group: audio - example: "{\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n}\n" - TranscriptionSegment: - required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob - type: object - properties: - id: - type: integer - description: Unique identifier of the segment. - seek: - type: integer - description: Seek offset of the segment. - start: - type: number - description: Start time of the segment in seconds. - format: float - end: - type: number - description: End time of the segment in seconds. - format: float - text: - type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - description: Temperature parameter used for generating the segment. - format: float - avg_logprob: - type: number - description: 'Average logprob of the segment. If the value is lower than -1, consider the logprobs failed.' - format: float - compression_ratio: - type: number - description: 'Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed.' - format: float - no_speech_prob: - type: number - description: 'Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent.' - format: float - TranscriptionWord: - required: - - word - - start - - end - type: object - properties: - word: - type: string - description: The text content of the word. - start: - type: number - description: Start time of the word in seconds. - format: float - end: - type: number - description: End time of the word in seconds. - format: float - CreateTranscriptionResponseVerboseJson: - required: - - language - - duration - - text - type: object - properties: - language: - type: string - description: The language of the input audio. - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The transcribed text. - words: - type: array - items: - $ref: '#/components/schemas/TranscriptionWord' - description: Extracted words and their corresponding timestamps. - segments: - type: array - items: - $ref: '#/components/schemas/TranscriptionSegment' - description: Segments of the transcribed text and their corresponding details. - description: 'Represents a verbose json transcription response returned by model, based on the provided input.' - x-oaiMeta: - name: The transcription object (Verbose JSON) - group: audio - example: "{\n \"task\": \"transcribe\",\n \"language\": \"english\",\n \"duration\": 8.470000267028809,\n \"text\": \"The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.\",\n \"segments\": [\n {\n \"id\": 0,\n \"seek\": 0,\n \"start\": 0.0,\n \"end\": 3.319999933242798,\n \"text\": \" The beach was a popular spot on a hot summer day.\",\n \"tokens\": [\n 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530\n ],\n \"temperature\": 0.0,\n \"avg_logprob\": -0.2860786020755768,\n \"compression_ratio\": 1.2363636493682861,\n \"no_speech_prob\": 0.00985979475080967\n },\n ...\n ]\n}\n" - CreateTranslationRequest: - required: - - file - - model - type: object - properties: - file: - type: string - description: "The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n" - format: binary - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - enum: - - whisper-1 - type: string - description: "ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available.\n" - example: whisper-1 - x-oaiTypeLabel: string - prompt: - type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.\n" - response_format: - type: string - description: "The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" - default: json - temperature: - type: number - description: "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n" - default: 0 - additionalProperties: false - CreateTranslationResponseJson: - required: - - text - type: object - properties: - text: - type: string - CreateTranslationResponseVerboseJson: - required: - - language - - duration - - text - type: object - properties: - language: - type: string - description: The language of the output translation (always `english`). - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The translated text. - segments: - type: array - items: - $ref: '#/components/schemas/TranscriptionSegment' - description: Segments of the translated text and their corresponding details. - CreateSpeechRequest: - required: - - model - - input - - voice - type: object - properties: - model: - anyOf: - - type: string - - enum: - - tts-1 - - tts-1-hd - type: string - description: "One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`\n" - x-oaiTypeLabel: string - input: - maxLength: 4096 - type: string - description: The text to generate audio for. The maximum length is 4096 characters. - voice: - enum: - - alloy - - echo - - fable - - onyx - - nova - - shimmer - type: string - description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options).' - response_format: - enum: - - mp3 - - opus - - aac - - flac - - wav - - pcm - type: string - description: 'The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.' - default: mp3 - speed: - maximum: 4.0 - minimum: 0.25 - type: number - description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. - default: 1 - additionalProperties: false - Model: - title: Model - required: - - id - - object - - created - - owned_by - properties: - id: - type: string - description: 'The model identifier, which can be referenced in the API endpoints.' - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: - enum: - - model - type: string - description: 'The object type, which is always "model".' - owned_by: - type: string - description: The organization that owns the model. - description: Describes an OpenAI model offering that can be used with the API. - x-oaiMeta: - name: The model object - example: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" - OpenAIFile: - title: OpenAIFile - required: - - id - - object - - bytes - - created_at - - filename - - purpose - - status - properties: - id: - type: string - description: 'The file identifier, which can be referenced in the API endpoints.' - bytes: - type: integer - description: 'The size of the file, in bytes.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: - enum: - - file - type: string - description: 'The object type, which is always `file`.' - purpose: - enum: - - assistants - - assistants_output - - batch - - batch_output - - fine-tune - - fine-tune-results - - vision - type: string - description: 'The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`.' - status: - enum: - - uploaded - - processed - - error - type: string - description: 'Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`.' - deprecated: true - status_details: - type: string - description: 'Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.' - deprecated: true - description: The `File` object represents a document that has been uploaded to OpenAI. - x-oaiMeta: - name: The file object - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n}\n" - Upload: - title: Upload - required: - - bytes - - created_at - - expires_at - - filename - - id - - purpose - - status - - step_number - type: object - properties: - id: - type: string - description: 'The Upload unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: - type: string - description: The name of the file to be uploaded. - bytes: - type: integer - description: The intended number of bytes to be uploaded. - purpose: - type: string - description: 'The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values.' - status: - enum: - - pending - - completed - - cancelled - - expired - type: string - description: The status of the Upload. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - object: - enum: - - upload - type: string - description: 'The object type, which is always "upload".' - file: - $ref: '#/components/schemas/OpenAIFile' - description: "The Upload object can accept byte chunks in the form of Parts.\n" - x-oaiMeta: - name: The upload object - example: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - UploadPart: - title: UploadPart - required: - - created_at - - id - - object - - upload_id - type: object - properties: - id: - type: string - description: 'The upload Part unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: - type: string - description: The ID of the Upload object that this Part was added to. - object: - enum: - - upload.part - type: string - description: 'The object type, which is always `upload.part`.' - description: "The upload Part represents a chunk of bytes we can add to an Upload object.\n" - x-oaiMeta: - name: The upload part object - example: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719186911,\n \"upload_id\": \"upload_abc123\"\n}\n" - Embedding: - required: - - index - - object - - embedding - type: object - properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. - embedding: - type: array - items: - type: number - description: "The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings).\n" - object: - enum: - - embedding - type: string - description: 'The object type, which is always "embedding".' - description: "Represents an embedding vector returned by embedding endpoint.\n" - x-oaiMeta: - name: The embedding object - example: "{\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n}\n" - FineTuningJob: - title: FineTuningJob - required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - - id - - model - - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed - type: object - properties: - id: - type: string - description: 'The object identifier, which can be referenced in the API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: - required: - - code - - message - - param - type: object - properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: - type: string - description: 'The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific.' - nullable: true - description: 'For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure.' - nullable: true - fine_tuned_model: - type: string - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - nullable: true - finished_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - nullable: true - hyperparameters: - required: - - n_epochs - type: object - properties: - n_epochs: - oneOf: - - enum: - - auto - type: string - - maximum: 50 - minimum: 1 - type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n\"auto\" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs." - default: auto - description: 'The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.' - model: - type: string - description: The base model that is being fine-tuned. - object: - enum: - - fine_tuning.job - type: string - description: 'The object type, which is always "fine_tuning.job".' - organization_id: - type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - items: - type: string - example: file-abc123 - description: 'The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents).' - status: - enum: - - validating_files - - queued - - running - - succeeded - - failed - - cancelled - type: string - description: 'The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.' - trained_tokens: - type: integer - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - nullable: true - training_file: - type: string - description: 'The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents).' - validation_file: - type: string - description: 'The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents).' - nullable: true - integrations: - maxItems: 5 - type: array - items: - oneOf: - - $ref: '#/components/schemas/FineTuningIntegration' - x-oaiExpandable: true - description: A list of integrations to enable for this fine-tuning job. - nullable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. - nullable: true - description: "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.\n" - x-oaiMeta: - name: The fine-tuning job object - example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - FineTuningIntegration: - title: Fine-Tuning Job Integration - required: - - type - - wandb - type: object - properties: - type: - enum: - - wandb - type: string - description: The type of the integration being enabled for the fine-tuning job - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - FineTuningJobEvent: - required: - - id - - object - - created_at - - level - - message - type: object - properties: - id: - type: string - created_at: - type: integer - level: - enum: - - info - - warn - - error - type: string - message: - type: string - object: - enum: - - fine_tuning.job.event - type: string - description: Fine-tuning job event object - x-oaiMeta: - name: The fine-tuning job event object - example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\"\n}\n" - FineTuningJobCheckpoint: - title: FineTuningJobCheckpoint - required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - - id - - metrics - - object - - step_number - type: object - properties: - id: - type: string - description: 'The checkpoint identifier, which can be referenced in the API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: - type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: - type: integer - description: The step number that the checkpoint was created at. - metrics: - type: object - properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - description: Metrics at the step number during the fine-tuning job. - fine_tuning_job_id: - type: string - description: The name of the fine-tuning job that this checkpoint was created from. - object: - enum: - - fine_tuning.job.checkpoint - type: string - description: 'The object type, which is always "fine_tuning.job.checkpoint".' - description: "The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use.\n" - x-oaiMeta: - name: The fine-tuning job checkpoint object - example: "{\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P\",\n \"created_at\": 1712211699,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88\",\n \"fine_tuning_job_id\": \"ftjob-fpbNQ3H1GrMehXRf8cO97xTN\",\n \"metrics\": {\n \"step\": 88,\n \"train_loss\": 0.478,\n \"train_mean_token_accuracy\": 0.924,\n \"valid_loss\": 10.112,\n \"valid_mean_token_accuracy\": 0.145,\n \"full_valid_loss\": 0.567,\n \"full_valid_mean_token_accuracy\": 0.944\n },\n \"step_number\": 88\n}\n" - FinetuneChatRequestInput: - type: object - properties: - messages: - minItems: 1 - type: array - items: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/FineTuneChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: A list of tools the model may generate JSON inputs for. - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - functions: - maxItems: 128 - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: A list of functions the model may generate JSON inputs for. - deprecated: true - description: The per-line training example of a fine-tuning input file for chat models - x-oaiMeta: - name: Training format for chat models - example: "{\n \"messages\": [\n { \"role\": \"user\", \"content\": \"What is the weather in San Francisco?\" },\n {\n \"role\": \"assistant\",\n \"tool_calls\": [\n {\n \"id\": \"call_id\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco, USA\\\", \\\"format\\\": \\\"celsius\\\"}\"\n }\n }\n ]\n }\n ],\n \"parallel_tool_calls\": false,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and country, eg. San Francisco, USA\"\n },\n \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"] }\n },\n \"required\": [\"location\", \"format\"]\n }\n }\n }\n ]\n}\n" - FinetuneCompletionRequestInput: - type: object - properties: - prompt: - type: string - description: The input prompt for this training example. - completion: - type: string - description: The desired completion for this training example. - description: The per-line training example of a fine-tuning input file for completions models - x-oaiMeta: - name: Training format for completions models - example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" - CompletionUsage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - description: Usage statistics for the completion request. - RunCompletionUsage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: 'Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.).' - nullable: true - RunStepCompletionUsage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. - nullable: true - AssistantsApiResponseFormatOption: - oneOf: - - enum: - - auto - type: string - description: "`auto` is the default value\n" - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - x-oaiExpandable: true - AssistantObject: - title: Assistant - required: - - id - - object - - created_at - - name - - description - - model - - instructions - - tools - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - assistant - type: string - description: 'The object type, which is always `assistant`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the assistant was created. - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - model: - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - instructions: - maxLength: 256000 - type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: Represents an `assistant` that can call the model and use tools. - x-oaiMeta: - name: The assistant object - beta: true - example: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - CreateAssistantRequest: - required: - - model - type: object - properties: - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: gpt-4o - x-oaiTypeLabel: string - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - instructions: - maxLength: 256000 - type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - x-oaiTypeLabel: map - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ModifyAssistantRequest: - type: object - properties: - model: - anyOf: - - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - instructions: - maxLength: 256000 - type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - DeleteAssistantResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - assistant.deleted - type: string - ListAssistantsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/AssistantObject' - first_id: - type: string - example: asst_abc123 - last_id: - type: string - example: asst_abc456 - has_more: - type: boolean - example: false - x-oaiMeta: - name: List assistants response object - group: chat - example: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - AssistantToolsCode: - title: Code interpreter tool - required: - - type - type: object - properties: - type: - enum: - - code_interpreter - type: string - description: 'The type of tool being defined: `code_interpreter`' - AssistantToolsFileSearch: - title: FileSearch tool - required: - - type - type: object - properties: - type: - enum: - - file_search - type: string - description: 'The type of tool being defined: `file_search`' - file_search: - type: object - properties: - max_num_results: - maximum: 50 - minimum: 1 - type: integer - description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information.\n" - description: Overrides for the file search tool. - AssistantToolsFileSearchTypeOnly: - title: FileSearch tool - required: - - type - type: object - properties: - type: - enum: - - file_search - type: string - description: 'The type of tool being defined: `file_search`' - AssistantToolsFunction: - title: Function tool - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of tool being defined: `function`' - function: - $ref: '#/components/schemas/FunctionObject' - TruncationObject: - title: Thread Truncation Controls - required: - - type - type: object - properties: - type: - enum: - - auto - - last_messages - type: string - description: 'The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.' - last_messages: - minimum: 1 - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - nullable: true - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - AssistantsApiToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required - type: string - description: "`none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.\n" - - $ref: '#/components/schemas/AssistantsNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tools and instead generates a message.\n`auto` is the default value and means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools before responding to the user.\nSpecifying a particular tool like `{\"type\": \"file_search\"}` or `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n" - x-oaiExpandable: true - AssistantsNamedToolChoice: - required: - - type - type: object - properties: - type: - enum: - - function - - code_interpreter - - file_search - type: string - description: 'The type of the tool. If type is `function`, the function name must be set' - function: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific tool. - RunObject: - title: A run on a thread - required: - - id - - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread.run - type: string - description: 'The object type, which is always `thread.run`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was created. - thread_id: - type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run.' - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run.' - status: - enum: - - queued - - in_progress - - requires_action - - cancelling - - cancelled - - failed - - completed - - incomplete - - expired - type: string - description: 'The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`.' - required_action: - required: - - type - - submit_tool_outputs - type: object - properties: - type: - enum: - - submit_tool_outputs - type: string - description: 'For now, this is always `submit_tool_outputs`.' - submit_tool_outputs: - required: - - tool_calls - type: object - properties: - tool_calls: - type: array - items: - $ref: '#/components/schemas/RunToolCallObject' - description: A list of the relevant tool calls. - description: Details on the tool outputs needed for this run to continue. - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - - invalid_prompt - type: string - description: 'One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.' - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the run will expire. - nullable: true - started_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was started. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run failed. - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was completed. - nullable: true - incomplete_details: - type: object - properties: - reason: - enum: - - max_completion_tokens - - max_prompt_tokens - type: string - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - nullable: true - model: - type: string - description: 'The model that the [assistant](/docs/api-reference/assistants) used for this run.' - instructions: - type: string - description: 'The instructions that the [assistant](/docs/api-reference/assistants) used for this run.' - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: 'The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.' - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunCompletionUsage' - temperature: - type: number - description: 'The sampling temperature used for this run. If not set, defaults to 1.' - nullable: true - top_p: - type: number - description: 'The nucleus sampling value used for this run. If not set, defaults to 1.' - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens specified to have been used over the course of the run.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens specified to have been used over the course of the run.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: 'Represents an execution run on a [thread](/docs/api-reference/threads).' - x-oaiMeta: - name: The run object - beta: true - example: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1698107661,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699073476,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699073498,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [{\"type\": \"file_search\"}, {\"type\": \"code_interpreter\"}],\n \"metadata\": {},\n \"incomplete_details\": null,\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - CreateRunRequest: - required: - - thread_id - - assistant_id - type: object - properties: - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' - nullable: true - example: gpt-4o - x-oaiTypeLabel: string - instructions: - type: string - description: 'Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.' - nullable: true - additional_instructions: - type: string - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. - nullable: true - additional_messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: Adds additional messages to the thread before creating the run. - nullable: true - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ListRunsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/RunObject' - first_id: - type: string - example: run_abc123 - last_id: - type: string - example: run_abc456 - has_more: - type: boolean - example: false - ModifyRunRequest: - type: object - properties: - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - SubmitToolOutputsRunRequest: - required: - - tool_outputs - type: object - properties: - tool_outputs: - type: array - items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - description: A list of tools for which the outputs are being submitted. - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - additionalProperties: false - RunToolCallObject: - required: - - id - - type - - function - type: object - properties: - id: - type: string - description: 'The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint.' - type: - enum: - - function - type: string - description: 'The type of tool call the output is required for. For now, this is always `function`.' - function: - required: - - name - - arguments - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments that the model expects you to pass to the function. - description: The function definition. - description: Tool call objects - CreateThreadAndRunRequest: - required: - - thread_id - - assistant_id - type: object - properties: - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - thread: - $ref: '#/components/schemas/CreateThreadRequest' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' - nullable: true - example: gpt-4o - x-oaiTypeLabel: string - instructions: - type: string - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. - nullable: true - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ThreadObject: - title: Thread - required: - - id - - object - - created_at - - tool_resources - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread - type: string - description: 'The object type, which is always `thread`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the thread was created. - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: 'Represents a thread that contains [messages](/docs/api-reference/messages).' - x-oaiMeta: - name: The thread object - beta: true - example: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1698107661,\n \"metadata\": {}\n}\n" - CreateThreadRequest: - type: object - properties: - messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: 'A list of [messages](/docs/api-reference/messages) to start the thread with.' - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - x-oaiTypeLabel: map - x-oaiExpandable: true - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ModifyThreadRequest: - type: object - properties: - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - DeleteThreadResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - thread.deleted - type: string - ListThreadsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/ThreadObject' - first_id: - type: string - example: asst_abc123 - last_id: - type: string - example: asst_abc456 - has_more: - type: boolean - example: false - MessageObject: - title: The message object - required: - - id - - object - - created_at - - thread_id - - status - - incomplete_details - - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread.message - type: string - description: 'The object type, which is always `thread.message`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was created. - thread_id: - type: string - description: 'The [thread](/docs/api-reference/threads) ID that this message belongs to.' - status: - enum: - - in_progress - - incomplete - - completed - type: string - description: 'The status of the message, which can be either `in_progress`, `incomplete`, or `completed`.' - incomplete_details: - required: - - reason - type: object - properties: - reason: - enum: - - content_filter - - max_tokens - - run_cancelled - - run_expired - - run_failed - type: string - description: The reason the message is incomplete. - description: 'On an incomplete message, details about why the message is incomplete.' - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was completed. - nullable: true - incomplete_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. - nullable: true - role: - enum: - - user - - assistant - type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageContentTextObject' - - $ref: '#/components/schemas/MessageContentRefusalObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - assistant_id: - type: string - description: 'If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message.' - nullable: true - run_id: - type: string - description: 'The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints.' - nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they were added to.' - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: 'Represents a message within a [thread](/docs/api-reference/threads).' - x-oaiMeta: - name: The message object - beta: true - example: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1698983503,\n \"thread_id\": \"thread_abc123\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hi! How can I help you today?\",\n \"annotations\": []\n }\n }\n ],\n \"assistant_id\": \"asst_abc123\",\n \"run_id\": \"run_abc123\",\n \"attachments\": [],\n \"metadata\": {}\n}\n" - MessageDeltaObject: - title: Message delta object - required: - - id - - object - - delta - type: object - properties: - id: - type: string - description: 'The identifier of the message, which can be referenced in API endpoints.' - object: - enum: - - thread.message.delta - type: string - description: 'The object type, which is always `thread.message.delta`.' - delta: - type: object - properties: - role: - enum: - - user - - assistant - type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentImageFileObject' - - $ref: '#/components/schemas/MessageDeltaContentTextObject' - - $ref: '#/components/schemas/MessageDeltaContentRefusalObject' - - $ref: '#/components/schemas/MessageDeltaContentImageUrlObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - description: The delta containing the fields that have changed on the Message. - description: "Represents a message delta i.e. any changed fields on a message during streaming.\n" - x-oaiMeta: - name: The message delta object - beta: true - example: "{\n \"id\": \"msg_123\",\n \"object\": \"thread.message.delta\",\n \"delta\": {\n \"content\": [\n {\n \"index\": 0,\n \"type\": \"text\",\n \"text\": { \"value\": \"Hello\", \"annotations\": [] }\n }\n ]\n }\n}\n" - CreateMessageRequest: - required: - - role - - content - type: object - properties: - role: - enum: - - user - - assistant - type: string - description: "The role of the entity that is creating the message. Allowed values include:\n- `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n- `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n" - content: - oneOf: - - title: Text content - type: string - description: The text contents of the message. - - title: Array of content parts - minItems: 1 - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageRequestContentTextObject' - x-oaiExpandable: true - description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview).' - x-oaiExpandable: true - attachments: - required: - - file_id - - tools - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they should be added to.' - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ModifyMessageRequest: - type: object - properties: - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - DeleteMessageResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - thread.message.deleted - type: string - ListMessagesResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/MessageObject' - first_id: - type: string - example: msg_abc123 - last_id: - type: string - example: msg_abc123 - has_more: - type: boolean - example: false - MessageContentImageFileObject: - title: Image file - required: - - type - - image_file - type: object - properties: - type: - enum: - - image_file - type: string - description: Always `image_file`. - image_file: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageDeltaContentImageFileObject: - title: Image file - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the content part in the message. - type: - enum: - - image_file - type: string - description: Always `image_file`. - image_file: - type: object - properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageContentImageUrlObject: - title: Image URL - required: - - type - - image_url - type: object - properties: - type: - enum: - - image_url - type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: 'The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto`' - default: auto - description: References an image URL in the content of a message. - MessageDeltaContentImageUrlObject: - title: Image URL - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the content part in the message. - type: - enum: - - image_url - type: string - description: Always `image_url`. - image_url: - type: object - properties: - url: - type: string - description: 'The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: References an image URL in the content of a message. - MessageContentTextObject: - title: Text - required: - - type - - text - type: object - properties: - type: - enum: - - text - type: string - description: Always `text`. - text: - required: - - value - - annotations - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageContentRefusalObject: - title: Refusal - required: - - type - - refusal - type: object - properties: - type: - enum: - - refusal - type: string - description: Always `refusal`. - refusal: - type: string - description: The refusal content generated by the assistant. - MessageRequestContentTextObject: - title: Text - required: - - type - - text - type: object - properties: - type: - enum: - - text - type: string - description: Always `text`. - text: - type: string - description: Text content to be sent to the model - description: The text content that is part of a message. - MessageContentTextAnnotationsFileCitationObject: - title: File citation - required: - - type - - text - - file_citation - - start_index - - end_index - type: object - properties: - type: - enum: - - file_citation - type: string - description: Always `file_citation`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_citation: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageContentTextAnnotationsFilePathObject: - title: File path - required: - - type - - text - - file_path - - start_index - - end_index - type: object - properties: - type: - enum: - - file_path - type: string - description: Always `file_path`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_path: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - MessageDeltaContentTextObject: - title: Text - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the content part in the message. - type: - enum: - - text - type: string - description: Always `text`. - text: - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageDeltaContentRefusalObject: - title: Refusal - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the refusal part in the message. - type: - enum: - - refusal - type: string - description: Always `refusal`. - refusal: - type: string - description: The refusal content that is part of a message. - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - enum: - - file_citation - type: string - description: Always `file_citation`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_citation: - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - quote: - type: string - description: The specific quote in the file. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - enum: - - file_path - type: string - description: Always `file_path`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_path: - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - RunStepObject: - title: Run steps - required: - - id - - object - - created_at - - assistant_id - - thread_id - - run_id - - type - - status - - step_details - - last_error - - expired_at - - cancelled_at - - failed_at - - completed_at - - metadata - - usage - type: object - properties: - id: - type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' - object: - enum: - - thread.run.step - type: string - description: 'The object type, which is always `thread.run.step`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was created. - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) associated with the run step.' - thread_id: - type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - run_id: - type: string - description: 'The ID of the [run](/docs/api-reference/runs) that this run step is a part of.' - type: - enum: - - message_creation - - tool_calls - type: string - description: 'The type of run step, which can be either `message_creation` or `tool_calls`.' - status: - enum: - - in_progress - - cancelled - - failed - - completed - - expired - type: string - description: 'The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.' - step_details: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' - description: The details of the run step. - x-oaiExpandable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run step. Will be `null` if there are no errors. - nullable: true - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step failed. - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step completed. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunStepCompletionUsage' - description: "Represents a step in execution of a run.\n" - x-oaiMeta: - name: The run step object - beta: true - example: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - RunStepDeltaObject: - title: Run step delta object - required: - - id - - object - - delta - type: object - properties: - id: - type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' - object: - enum: - - thread.run.step.delta - type: string - description: 'The object type, which is always `thread.run.step.delta`.' - delta: - type: object - properties: - step_details: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' - description: The details of the run step. - x-oaiExpandable: true - description: The delta containing the fields that have changed on the run step. - description: "Represents a run step delta i.e. any changed fields on a run step during streaming.\n" - x-oaiMeta: - name: The run step delta object - beta: true - example: "{\n \"id\": \"step_123\",\n \"object\": \"thread.run.step.delta\",\n \"delta\": {\n \"step_details\": {\n \"type\": \"tool_calls\",\n \"tool_calls\": [\n {\n \"index\": 0,\n \"id\": \"call_123\",\n \"type\": \"code_interpreter\",\n \"code_interpreter\": { \"input\": \"\", \"outputs\": [] }\n }\n ]\n }\n }\n}\n" - ListRunStepsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/RunStepObject' - first_id: - type: string - example: step_abc123 - last_id: - type: string - example: step_abc456 - has_more: - type: boolean - example: false - RunStepDetailsMessageCreationObject: - title: Message creation - required: - - type - - message_creation - type: object - properties: - type: - enum: - - message_creation - type: string - description: Always `message_creation`. - message_creation: - required: - - message_id - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation - required: - - type - type: object - properties: - type: - enum: - - message_creation - type: string - description: Always `message_creation`. - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDetailsToolCallsObject: - title: Tool calls - required: - - type - - tool_calls - type: object - properties: - type: - enum: - - tool_calls - type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls - required: - - type - type: object - properties: - type: - enum: - - tool_calls - type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call - required: - - id - - type - - code_interpreter - type: object - properties: - id: - type: string - description: The ID of the tool call. - type: - enum: - - code_interpreter - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: - required: - - input - - outputs - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call. - type: - enum: - - code_interpreter - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output - required: - - type - - logs - type: object - properties: - type: - enum: - - logs - type: string - description: Always `logs`. - logs: - type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - enum: - - logs - type: string - description: Always `logs`. - logs: - type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output - required: - - type - - image - type: object - properties: - type: - enum: - - image - type: string - description: Always `image`. - image: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - enum: - - image - type: string - description: Always `image`. - image: - type: object - properties: - file_id: - type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call - required: - - id - - type - - file_search - type: object - properties: - id: - type: string - description: The ID of the tool call object. - type: - enum: - - file_search - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call - required: - - index - - type - - file_search - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - enum: - - file_search - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDetailsToolCallsFunctionObject: - title: Function tool call - required: - - id - - type - - function - type: object - properties: - id: - type: string - description: The ID of the tool call object. - type: - enum: - - function - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - required: - - name - - arguments - - output - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - RunStepDeltaStepDetailsToolCallsFunctionObject: - title: Function tool call - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - enum: - - function - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - VectorStoreExpirationAfter: - title: Vector store expiration policy - required: - - anchor - - days - type: object - properties: - anchor: - enum: - - last_active_at - type: string - description: 'Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.' - days: - maximum: 365 - minimum: 1 - type: integer - description: The number of days after the anchor time that the vector store will expire. - description: The expiration policy for a vector store. - VectorStoreObject: - title: Vector store - required: - - id - - object - - usage_bytes - - created_at - - status - - last_active_at - - name - - file_counts - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store - type: string - description: 'The object type, which is always `vector_store`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store was created. - name: - type: string - description: The name of the vector store. - usage_bytes: - type: integer - description: The total number of bytes used by the files in the vector store. - file_counts: - required: - - in_progress - - completed - - failed - - cancelled - - total - type: object - properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been successfully processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that were cancelled. - total: - type: integer - description: The total number of files. - status: - enum: - - expired - - in_progress - - completed - type: string - description: 'The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use.' - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store will expire. - nullable: true - last_active_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store was last active. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: A vector store is a collection of processed files can be used by the `file_search` tool. - x-oaiMeta: - name: The vector store object - beta: true - example: "{\n \"id\": \"vs_123\",\n \"object\": \"vector_store\",\n \"created_at\": 1698107661,\n \"usage_bytes\": 123456,\n \"last_active_at\": 1698107661,\n \"name\": \"my_vector_store\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"cancelled\": 0,\n \"failed\": 0,\n \"total\": 100\n },\n \"metadata\": {},\n \"last_used_at\": 1698107661\n}\n" - CreateVectorStoreRequest: - type: object - properties: - file_ids: - maxItems: 500 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - name: - type: string - description: The name of the vector store. - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - UpdateVectorStoreRequest: - type: object - properties: - name: - type: string - description: The name of the vector store. - nullable: true - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' + - url: https://api.openai.com/v1 +tags: + - name: Assistants + description: Build Assistants that can call models and use tools. + - name: Audio + description: Turn audio into text or text into audio. + - name: Chat + description: Given a list of messages comprising a conversation, the model will + return a response. + - name: Completions + description: Given a prompt, the model will return one or more predicted + completions, and can also return the probabilities of alternative tokens + at each position. + - name: Embeddings + description: Get a vector representation of a given input that can be easily + consumed by machine learning models and algorithms. + - name: Fine-tuning + description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Batch + description: Create large batches of API requests to run asynchronously. + - name: Files + description: Files are used to upload documents that can be used with features + like Assistants and Fine-tuning. + - name: Uploads + description: Use Uploads to upload large files in multiple parts. + - name: Images + description: Given a prompt and/or an input image, the model will generate a new image. + - name: Models + description: List and describe the various models available in the API. + - name: Moderations + description: Given text and/or image inputs, classifies if those inputs are + potentially harmful. + - name: Audit Logs + description: List user actions and configuration changes within this organization. +paths: + /assistants: + get: + operationId: listAssistants + tags: + - Assistants + summary: Returns a list of assistants. + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListAssistantsResponse" + x-oaiMeta: + name: List assistants + group: assistants + beta: true + returns: A list of [assistant](/docs/api-reference/assistants/object) objects. + examples: + request: + curl: | + curl "https://api.openai.com/v1/assistants?order=desc&limit=20" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_assistants = client.beta.assistants.list( + order="desc", + limit="20", + ) + print(my_assistants.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistants = await openai.beta.assistants.list({ + order: "desc", + limit: "20", + }); + + console.log(myAssistants.data); + } + + main(); + response: > + { + "object": "list", + "data": [ + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4o", + "instructions": null, + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false + } + post: + operationId: createAssistant + tags: + - Assistants + summary: Create an assistant with a model and instructions. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Create assistant + group: assistants + beta: true + returns: An [assistant](/docs/api-reference/assistants/object) object. + examples: + - title: Code Interpreter + request: + curl: > + curl "https://api.openai.com/v1/assistants" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "name": "Math Tutor", + "tools": [{"type": "code_interpreter"}], + "model": "gpt-4o" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + my_assistant = client.beta.assistants.create( + instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name="Math Tutor", + tools=[{"type": "code_interpreter"}], + model="gpt-4o", + ) + + print(my_assistant) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name: "Math Tutor", + tools: [{ type: "code_interpreter" }], + model: "gpt-4o", + }); + + console.log(myAssistant); + } + + + main(); + response: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698984975, + "name": "Math Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + - title: Files + request: + curl: > + curl https://api.openai.com/v1/assistants \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [{"type": "file_search"}], + "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, + "model": "gpt-4o" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + my_assistant = client.beta.assistants.create( + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", + name="HR Helper", + tools=[{"type": "file_search"}], + tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, + model="gpt-4o" + ) + + print(my_assistant) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies.", + name: "HR Helper", + tools: [{ type: "file_search" }], + tool_resources: { + file_search: { + vector_store_ids: ["vs_123"] + } + }, + model: "gpt-4o" + }); + + console.log(myAssistant); + } + + + main(); + response: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009403, + "name": "HR Helper", + "description": null, + "model": "gpt-4o", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": ["vs_123"] + } + }, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + /assistants/{assistant_id}: + get: + operationId: getAssistant + tags: + - Assistants + summary: Retrieves an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Retrieve assistant + group: assistants + beta: true + returns: The [assistant](/docs/api-reference/assistants/object) object matching + the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.retrieve("asst_abc123") + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.retrieve( + "asst_abc123" + ); + + console.log(myAssistant); + } + + main(); + response: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4o", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "file_search" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + post: + operationId: modifyAssistant + tags: + - Assistants + summary: Modifies an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Modify assistant + group: assistants + beta: true + returns: The modified [assistant](/docs/api-reference/assistants/object) object. + examples: + request: + curl: > + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [{"type": "file_search"}], + "model": "gpt-4o" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + my_updated_assistant = client.beta.assistants.update( + "asst_abc123", + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name="HR Helper", + tools=[{"type": "file_search"}], + model="gpt-4o" + ) + + + print(my_updated_assistant) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const myUpdatedAssistant = await openai.beta.assistants.update( + "asst_abc123", + { + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name: "HR Helper", + tools: [{ type: "file_search" }], + model: "gpt-4o" + } + ); + + console.log(myUpdatedAssistant); + } + + + main(); + response: > + { + "id": "asst_123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4o", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": [] + } + }, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + delete: + operationId: deleteAssistant + tags: + - Assistants + summary: Delete an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteAssistantResponse" + x-oaiMeta: + name: Delete assistant + group: assistants + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + response = client.beta.assistants.delete("asst_abc123") + print(response) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const response = await openai.beta.assistants.del("asst_abc123"); + + console.log(response); + } + + main(); + response: | + { + "id": "asst_abc123", + "object": "assistant.deleted", + "deleted": true + } + /audio/speech: + post: + operationId: createSpeech + tags: + - Audio + summary: Generates audio from the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateSpeechRequest" + responses: + "200": + description: OK + headers: + Transfer-Encoding: + schema: + type: string + description: chunked + content: + application/octet-stream: + schema: + type: string + format: binary + x-oaiMeta: + name: Create speech + group: audio + returns: The audio file content. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/speech \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "tts-1", + "input": "The quick brown fox jumped over the lazy dog.", + "voice": "alloy" + }' \ + --output speech.mp3 + python: | + from pathlib import Path + import openai + + speech_file_path = Path(__file__).parent / "speech.mp3" + response = openai.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + response.stream_to_file(speech_file_path) + node: > + import fs from "fs"; + + import path from "path"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const speechFile = path.resolve("./speech.mp3"); + + + async function main() { + const mp3 = await openai.audio.speech.create({ + model: "tts-1", + voice: "alloy", + input: "Today is a wonderful day to build something people love!", + }); + console.log(speechFile); + const buffer = Buffer.from(await mp3.arrayBuffer()); + await fs.promises.writeFile(speechFile, buffer); + } + + main(); + /audio/transcriptions: + post: + operationId: createTranscription + tags: + - Audio + summary: Transcribes audio into the input language. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranscriptionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CreateTranscriptionResponseJson" + - $ref: "#/components/schemas/CreateTranscriptionResponseVerboseJson" + x-oaiMeta: + name: Create transcription + group: audio + returns: The [transcription object](/docs/api-reference/audio/json-object) or a + [verbose transcription + object](/docs/api-reference/audio/verbose-json-object). + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + node: > + import fs from "fs"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + }); + + console.log(transcription.text); + } + + main(); + response: > + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + - title: Word timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=word" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["word"] + ) + + print(transcript.words) + node: > + import fs from "fs"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["word"] + }); + + console.log(transcription.text); + } + + main(); + response: > + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "words": [ + { + "word": "The", + "start": 0.0, + "end": 0.23999999463558197 + }, + ... + { + "word": "volleyball", + "start": 7.400000095367432, + "end": 7.900000095367432 + } + ] + } + - title: Segment timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=segment" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["segment"] + ) + + print(transcript.words) + node: > + import fs from "fs"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["segment"] + }); + + console.log(transcription.text); + } + + main(); + response: > + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "segments": [ + { + "id": 0, + "seek": 0, + "start": 0.0, + "end": 3.319999933242798, + "text": " The beach was a popular spot on a hot summer day.", + "tokens": [ + 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530 + ], + "temperature": 0.0, + "avg_logprob": -0.2860786020755768, + "compression_ratio": 1.2363636493682861, + "no_speech_prob": 0.00985979475080967 + }, + ... + ] + } + /audio/translations: + post: + operationId: createTranslation + tags: + - Audio + summary: Translates audio into English. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranslationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CreateTranslationResponseJson" + - $ref: "#/components/schemas/CreateTranslationResponseVerboseJson" + x-oaiMeta: + name: Create translation + group: audio + returns: The translated text. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/translations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/german.m4a" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.translations.create( + model="whisper-1", + file=audio_file + ) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const translation = await openai.audio.translations.create({ + file: fs.createReadStream("speech.mp3"), + model: "whisper-1", + }); + + console.log(translation.text); + } + main(); + response: > + { + "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" + } + /batches: + post: + summary: Creates and executes a batch from an uploaded file of requests + operationId: createBatch + tags: + - Batch + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - input_file_id + - endpoint + - completion_window + properties: + input_file_id: + type: string + description: > + The ID of an uploaded file that contains requests for the + new batch. + + + See [upload file](/docs/api-reference/files/create) for how + to upload a file. + + + Your input file must be formatted as a [JSONL + file](/docs/api-reference/batch/request-input), and must be + uploaded with the purpose `batch`. The file can contain up + to 50,000 requests, and can be up to 200 MB in size. + endpoint: + type: string + enum: + - /v1/chat/completions + - /v1/embeddings + - /v1/completions + description: The endpoint to be used for all requests in the batch. Currently + `/v1/chat/completions`, `/v1/embeddings`, and + `/v1/completions` are supported. Note that `/v1/embeddings` + batches are also restricted to a maximum of 50,000 embedding + inputs across all requests in the batch. + completion_window: + type: string + enum: + - 24h + description: The time frame within which the batch should be processed. + Currently only `24h` is supported. metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ListVectorStoresResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreObject' - first_id: - type: string - example: vs_abc123 - last_id: - type: string - example: vs_abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - vector_store.deleted - type: string - VectorStoreFileObject: - title: Vector store files - required: - - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store.file - type: string - description: 'The object type, which is always `vector_store.file`.' - usage_bytes: - type: integer - description: The total vector store usage in bytes. Note that this may be different from the original file size. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store file was created. - vector_store_id: - type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: - enum: - - in_progress - - completed - - cancelled - - failed - type: string - description: 'The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use.' - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - unsupported_file - - invalid_file - type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/StaticChunkingStrategyResponseParam' - - $ref: '#/components/schemas/OtherChunkingStrategyResponseParam' - description: The strategy used to chunk the file. - x-oaiExpandable: true - description: A list of files attached to a vector store. - x-oaiMeta: - name: The vector store file object - beta: true - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"usage_bytes\": 1234,\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"last_error\": null,\n \"chunking_strategy\": {\n \"type\": \"static\",\n \"static\": {\n \"max_chunk_size_tokens\": 800,\n \"chunk_overlap_tokens\": 400\n }\n }\n}\n" - OtherChunkingStrategyResponseParam: - title: Other Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - other - type: string - description: Always `other`. - additionalProperties: false - description: 'This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.' - StaticChunkingStrategyResponseParam: - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - StaticChunkingStrategy: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - AutoChunkingStrategyRequestParam: - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - StaticChunkingStrategyRequestParam: - title: Static Chunking Strategy - required: - - type - - static + type: object + additionalProperties: + type: string + description: Optional custom metadata for the batch. + nullable: true + responses: + "200": + description: Batch created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Create batch + group: batch + returns: The created [Batch](/docs/api-reference/batch/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input_file_id": "file-abc123", + "endpoint": "/v1/chat/completions", + "completion_window": "24h" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.create( + input_file_id="file-abc123", + endpoint="/v1/chat/completions", + completion_window="24h" + ) + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.create({ + input_file_id: "file-abc123", + endpoint: "/v1/chat/completions", + completion_window: "24h" + }); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/chat/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "validating", + "output_file_id": null, + "error_file_id": null, + "created_at": 1711471533, + "in_progress_at": null, + "expires_at": null, + "finalizing_at": null, + "completed_at": null, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 0, + "completed": 0, + "failed": 0 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + get: + operationId: listBatches + tags: + - Batch + summary: List your organization's batches. + parameters: + - in: query + name: after + required: false + schema: + type: string + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: Batch listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListBatchesResponse" + x-oaiMeta: + name: List batch + group: batch + returns: A list of paginated [Batch](/docs/api-reference/batch/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.list() + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.batches.list(); + + for await (const batch of list) { + console.log(batch); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/chat/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly job", + } + }, + { ... }, + ], + "first_id": "batch_abc123", + "last_id": "batch_abc456", + "has_more": true + } + /batches/{batch_id}: + get: + operationId: retrieveBatch + tags: + - Batch + summary: Retrieves a batch. + parameters: + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the batch to retrieve. + responses: + "200": + description: Batch retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Retrieve batch + group: batch + returns: The [Batch](/docs/api-reference/batch/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches/batch_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.retrieve("batch_abc123") + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.retrieve("batch_abc123"); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + /batches/{batch_id}/cancel: + post: + operationId: cancelBatch + tags: + - Batch + summary: Cancels an in-progress batch. The batch will be in status `cancelling` + for up to 10 minutes, before changing to `cancelled`, where it will have + partial results (if any) available in the output file. + parameters: + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the batch to cancel. + responses: + "200": + description: Batch is cancelling. Returns the cancelling batch's details. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Cancel batch + group: batch + returns: The [Batch](/docs/api-reference/batch/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches/batch_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.cancel("batch_abc123") + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.cancel("batch_abc123"); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/chat/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "cancelling", + "output_file_id": null, + "error_file_id": null, + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": null, + "completed_at": null, + "failed_at": null, + "expired_at": null, + "cancelling_at": 1711475133, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 23, + "failed": 1 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + /chat/completions: + post: + operationId: createChatCompletion + tags: + - Chat + summary: > + Creates a model response for the given chat conversation. Learn more in + the + + [text generation](/docs/guides/text-generation), + [vision](/docs/guides/vision), + + and [audio](/docs/guides/audio) guides. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionResponse" + x-oaiMeta: + name: Create chat completion + group: chat + returns: > + Returns a [chat completion](/docs/api-reference/chat/object) object, + or a streamed sequence of [chat completion + chunk](/docs/api-reference/chat/streaming) objects if the request is + streamed. + path: create + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_chat_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] + ) + + + print(completion.choices[0].message) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "system", content: "You are a helpful assistant." }], + model: "VAR_chat_model_id", + }); + + console.log(completion.choices[0]); + } + + + main(); + response: | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + - title: Image input + request: + curl: > + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What'\''s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ] + } + ], + "max_tokens": 300 + }' + python: > + from openai import OpenAI + + + client = OpenAI() + + + response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + } + }, + ], + } + ], + max_tokens=300, + ) + + + print(response.choices[0]) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: [ + { + role: "user", + content: [ + { type: "text", text: "What's in this image?" }, + { + type: "image_url", + image_url: { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + } + ], + }, + ], + }); + console.log(response.choices[0]); + } + + main(); + response: > + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nThis image shows a wooden boardwalk extending through a lush green marshland.", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_chat_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ], + "stream": true + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream=True + ) + + + for chunk in completion: + print(chunk.choices[0].delta) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const completion = await openai.chat.completions.create({ + model: "VAR_chat_model_id", + messages: [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream: true, + }); + + for await (const chunk of completion) { + console.log(chunk.choices[0].delta.content); + } + } + + + main(); + response: > + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + + + .... + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + - title: Functions + request: + curl: > + curl https://api.openai.com/v1/chat/completions \ + + -H "Content-Type: application/json" \ + + -H "Authorization: Bearer $OPENAI_API_KEY" \ + + -d '{ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What'\''s the weather like in Boston today?" + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "tool_choice": "auto" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + messages = [{"role": "user", "content": "What's the weather like + in Boston today?"}] + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=messages, + tools=tools, + tool_choice="auto" + ) + + + print(completion) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: messages, + tools: tools, + tool_choice: "auto", + }); + + console.log(response); + } + + + main(); + response: | + { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-4o-mini", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 17, + "total_tokens": 99, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + - title: Logprobs + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_chat_model_id", + "messages": [ + { + "role": "user", + "content": "Hello!" + } + ], + "logprobs": true, + "top_logprobs": 2 + }' + python: | + from openai import OpenAI + client = OpenAI() + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=[ + {"role": "user", "content": "Hello!"} + ], + logprobs=True, + top_logprobs=2 + ) + + print(completion.choices[0].message) + print(completion.choices[0].logprobs) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "user", content: "Hello!" }], + model: "VAR_chat_model_id", + logprobs: true, + top_logprobs: 2, + }); + + console.log(completion.choices[0]); + } + + main(); + response: | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1702685778, + "model": "gpt-4o-mini", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + "logprobs": { + "content": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111], + "top_logprobs": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111] + }, + { + "token": "Hi", + "logprob": -1.3190403, + "bytes": [72, 105] + } + ] + }, + { + "token": "!", + "logprob": -0.02380986, + "bytes": [ + 33 + ], + "top_logprobs": [ + { + "token": "!", + "logprob": -0.02380986, + "bytes": [33] + }, + { + "token": " there", + "logprob": -3.787621, + "bytes": [32, 116, 104, 101, 114, 101] + } + ] + }, + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119], + "top_logprobs": [ + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119] + }, + { + "token": "<|end|>", + "logprob": -10.953937, + "bytes": null + } + ] + }, + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110], + "top_logprobs": [ + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110] + }, + { + "token": " may", + "logprob": -4.161023, + "bytes": [32, 109, 97, 121] + } + ] + }, + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [ + 32, + 73 + ], + "top_logprobs": [ + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [32, 73] + }, + { + "token": " assist", + "logprob": -13.596657, + "bytes": [32, 97, 115, 115, 105, 115, 116] + } + ] + }, + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116], + "top_logprobs": [ + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116] + }, + { + "token": " help", + "logprob": -3.1089056, + "bytes": [32, 104, 101, 108, 112] + } + ] + }, + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117], + "top_logprobs": [ + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117] + }, + { + "token": " today", + "logprob": -12.807695, + "bytes": [32, 116, 111, 100, 97, 121] + } + ] + }, + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121], + "top_logprobs": [ + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121] + }, + { + "token": "?", + "logprob": -5.5247097, + "bytes": [63] + } + ] + }, + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63], + "top_logprobs": [ + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63] + }, + { + "token": "?\n", + "logprob": -7.184561, + "bytes": [63, 10] + } + ] + } + ] + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 9, + "total_tokens": 18, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "system_fingerprint": null + } + /completions: + post: + operationId: createCompletion + tags: + - Completions + summary: Creates a completion for the provided prompt and parameters. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionResponse" + x-oaiMeta: + name: Create completion + group: completions + returns: > + Returns a [completion](/docs/api-reference/completions/object) object, + or a sequence of completion objects if the request is streamed. + legacy: true + examples: + - title: No streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_completion_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0 + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.completions.create( + model="VAR_completion_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0 + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.completions.create({ + model: "VAR_completion_model_id", + prompt: "Say this is a test.", + max_tokens: 7, + temperature: 0, + }); + + console.log(completion); + } + main(); + response: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "VAR_completion_model_id", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_completion_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0, + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + for chunk in client.completions.create( + model="VAR_completion_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0, + stream=True + ): + print(chunk.choices[0].text) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.completions.create({ + model: "VAR_completion_model_id", + prompt: "Say this is a test.", + stream: true, + }); + + for await (const chunk of stream) { + console.log(chunk.choices[0].text) + } + } + main(); + response: | + { + "id": "cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe", + "object": "text_completion", + "created": 1690759702, + "choices": [ + { + "text": "This", + "index": 0, + "logprobs": null, + "finish_reason": null + } + ], + "model": "gpt-3.5-turbo-instruct" + "system_fingerprint": "fp_44709d6fcb", + } + /embeddings: + post: + operationId: createEmbedding + tags: + - Embeddings + summary: Creates an embedding vector representing the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingResponse" + x-oaiMeta: + name: Create embeddings + group: embeddings + returns: A list of [embedding](/docs/api-reference/embeddings/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/embeddings \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input": "The food was delicious and the waiter...", + "model": "text-embedding-ada-002", + "encoding_format": "float" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.embeddings.create( + model="text-embedding-ada-002", + input="The food was delicious and the waiter...", + encoding_format="float" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const embedding = await openai.embeddings.create({ + model: "text-embedding-ada-002", + input: "The quick brown fox jumped over the lazy dog", + encoding_format: "float", + }); + + console.log(embedding); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + /files: + get: + operationId: listFiles + tags: + - Files + summary: Returns a list of files. + parameters: + - in: query + name: purpose + required: false + schema: + type: string + description: Only return files with the given purpose. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 10,000, and the default is 10,000. + required: false + schema: + type: integer + default: 10000 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFilesResponse" + x-oaiMeta: + name: List files + group: files + returns: A list of [File](/docs/api-reference/files/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.files.list(); + + for await (const file of list) { + console.log(file); + } + } + + main(); + response: | + { + "data": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 175, + "created_at": 1613677385, + "filename": "salesOverview.pdf", + "purpose": "assistants", + }, + { + "id": "file-abc123", + "object": "file", + "bytes": 140, + "created_at": 1613779121, + "filename": "puppy.jsonl", + "purpose": "fine-tune", + } + ], + "object": "list" + } + post: + operationId: createFile + tags: + - Files + summary: > + Upload a file that can be used across various endpoints. Individual + files can be up to 512 MB, and the size of all files uploaded by one + organization can be up to 100 GB. + + + The Assistants API supports files up to 2 million tokens and of specific + file types. See the [Assistants Tools guide](/docs/assistants/tools) for + details. + + + The Fine-tuning API only supports `.jsonl` files. The input also has + certain required formats for fine-tuning + [chat](/docs/api-reference/fine-tuning/chat-input) or + [completions](/docs/api-reference/fine-tuning/completions-input) models. + + + The Batch API only supports `.jsonl` files up to 200 MB in size. The + input also has a specific required + [format](/docs/api-reference/batch/request-input). + + + Please [contact us](https://help.openai.com/) if you need to increase + these storage limits. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Upload file + group: files + returns: The uploaded [File](/docs/api-reference/files/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F purpose="fine-tune" \ + -F file="@mydata.jsonl" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.create( + file=open("mydata.jsonl", "rb"), + purpose="fine-tune" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.create({ + file: fs.createReadStream("mydata.jsonl"), + purpose: "fine-tune", + }); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "mydata.jsonl", + "purpose": "fine-tune", + } + /files/{file_id}: + delete: + operationId: deleteFile + tags: + - Files + summary: Delete a file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteFileResponse" + x-oaiMeta: + name: Delete file + group: files + returns: Deletion status. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.delete("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.del("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "deleted": true + } + get: + operationId: retrieveFile + tags: + - Files + summary: Returns information about a specific file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Retrieve file + group: files + returns: The [File](/docs/api-reference/files/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.retrieve("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.retrieve("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "mydata.jsonl", + "purpose": "fine-tune", + } + /files/{file_id}/content: + get: + operationId: downloadFile + tags: + - Files + summary: Returns the contents of the specified file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + type: string + x-oaiMeta: + name: Retrieve file content + group: files + returns: The file content. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123/content \ + -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl + python: | + from openai import OpenAI + client = OpenAI() + + content = client.files.content("file-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.content("file-abc123"); + + console.log(file); + } + + main(); + /fine_tuning/jobs: + post: + operationId: createFineTuningJob + tags: + - Fine-tuning + summary: > + Creates a fine-tuning job which begins the process of creating a new + model from a given dataset. + + + Response includes details of the enqueued job including job status and + the name of the fine-tuned models once complete. + + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateFineTuningJobRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Create fine-tuning job + group: fine-tuning + returns: A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "model": "gpt-4o-mini" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-4o-mini" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + } + - title: Epochs + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "model": "gpt-4o-mini", + "hyperparameters": { + "n_epochs": 2 + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-4o-mini", + hyperparameters={ + "n_epochs":2 + } + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + model: "gpt-4o-mini", + hyperparameters: { n_epochs: 2 } + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": {"n_epochs": 2}, + } + - title: Validation file + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-4o-mini" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + validation_file="file-def456", + model="gpt-4o-mini" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + } + - title: W&B Integration + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-4o-mini", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "name": "ft-run-display-name" + "tags": [ + "first-experiment", "v2" + ] + } + } + ] + }' + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "entity": None, + "run_id": "ftjob-abc123" + } + } + ] + } + get: + operationId: listPaginatedFineTuningJobs + tags: + - Fine-tuning + summary: | + List your organization's fine-tuning jobs + parameters: + - name: after + in: query + description: Identifier for the last job from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of fine-tuning jobs to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListPaginatedFineTuningJobsResponse" + x-oaiMeta: + name: List fine-tuning jobs + group: fine-tuning + returns: A list of paginated [fine-tuning + job](/docs/api-reference/fine-tuning/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.jobs.list(); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: > + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-TjX0lMfOniCZX64t9PUQT5hn", + "created_at": 1689813489, + "level": "warn", + "message": "Fine tuning process stopping due to job cancellation", + "data": null, + "type": "message" + }, + { ... }, + { ... } + ], "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}: + get: + operationId: retrieveFineTuningJob + tags: + - Fine-tuning + summary: | + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Retrieve fine-tuning job + group: fine-tuning + returns: The [fine-tuning](/docs/api-reference/fine-tuning/object) object with + the given ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F + \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.retrieve("ftjob-abc123") + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); + + console.log(fineTune); + } + + + main(); + response: > + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "davinci-002", + "created_at": 1692661014, + "finished_at": 1692661190, + "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", + "organization_id": "org-123", + "result_files": [ + "file-abc123" + ], + "status": "succeeded", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": { + "n_epochs": 4, + "batch_size": 1, + "learning_rate_multiplier": 1.0 + }, + "trained_tokens": 5768, + "integrations": [], + "seed": 0, + "estimated_finish": 0 + } + /fine_tuning/jobs/{fine_tuning_job_id}/cancel: + post: + operationId: cancelFineTuningJob + tags: + - Fine-tuning + summary: | + Immediately cancel a fine-tune job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Cancel fine-tuning + group: fine-tuning + returns: The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.cancel("ftjob-abc123") + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "hyperparameters": { + "n_epochs": "auto" + }, + "status": "cancelled", + "validation_file": "file-abc123", + "training_file": "file-abc123" + } + /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints: + get: + operationId: listFineTuningJobCheckpoints + tags: + - Fine-tuning + summary: | + List checkpoints for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get checkpoints for. + - name: after + in: query + description: Identifier for the last checkpoint ID from the previous pagination + request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of checkpoints to retrieve. + required: false + schema: + type: integer + default: 10 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobCheckpointsResponse" + x-oaiMeta: + name: List fine-tuning checkpoints + group: fine-tuning + returns: A list of fine-tuning [checkpoint + objects](/docs/api-reference/fine-tuning/checkpoint-object) for a + fine-tuning job. + examples: + request: + curl: > + curl + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints + \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: > + { + "object": "list" + "data": [ + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "created_at": 1721764867, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000", + "metrics": { + "full_valid_loss": 0.134, + "full_valid_mean_token_accuracy": 0.874 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 2000, + }, + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "created_at": 1721764800, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "metrics": { + "full_valid_loss": 0.167, + "full_valid_mean_token_accuracy": 0.781 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 1000, + }, + ], + "first_id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "last_id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}/events: + get: + operationId: listFineTuningEvents + tags: + - Fine-tuning + summary: | + Get status updates for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get events for. + - name: after + in: query + description: Identifier for the last event from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of events to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobEventsResponse" + x-oaiMeta: + name: List fine-tuning events + group: fine-tuning + returns: A list of fine-tuning event objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list_events( + fine_tuning_job_id="ftjob-abc123", + limit=2 + ) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + + main(); + response: > + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", + "created_at": 1721764800, + "level": "info", + "message": "Fine tuning job successfully completed", + "data": null, + "type": "message" + }, + { + "object": "fine_tuning.job.event", + "id": "ft-event-tyiGuB72evQncpH87xe505Sv", + "created_at": 1721764800, + "level": "info", + "message": "New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel", + "data": null, + "type": "message" + } + ], + "has_more": true + } + /images/edits: + post: + operationId: createImageEdit + tags: + - Images + summary: Creates an edited or extended image given an original image and a prompt. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageEditRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image edit + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/edits \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F mask="@mask.png" \ + -F prompt="A cute baby sea otter wearing a beret" \ + -F n=2 \ + -F size="1024x1024" + python: | + from openai import OpenAI + client = OpenAI() + + client.images.edit( + image=open("otter.png", "rb"), + mask=open("mask.png", "rb"), + prompt="A cute baby sea otter wearing a beret", + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.edit({ + image: fs.createReadStream("otter.png"), + mask: fs.createReadStream("mask.png"), + prompt: "A cute baby sea otter wearing a beret", + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /images/generations: + post: + operationId: createImage + tags: + - Images + summary: Creates an image given a prompt. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateImageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/generations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "dall-e-3", + "prompt": "A cute baby sea otter", + "n": 1, + "size": "1024x1024" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); + + console.log(image.data); + } + + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /images/variations: + post: + operationId: createImageVariation + tags: + - Images + summary: Creates a variation of a given image. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageVariationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image variation + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/variations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F n=2 \ + -F size="1024x1024" + python: | + from openai import OpenAI + client = OpenAI() + + response = client.images.create_variation( + image=open("image_edit_original.png", "rb"), + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.createVariation({ + image: fs.createReadStream("otter.png"), + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /models: + get: + operationId: listModels + tags: + - Models + summary: Lists the currently available models, and provides basic information + about each one such as the owner and availability. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListModelsResponse" + x-oaiMeta: + name: List models + group: models + returns: A list of [model](/docs/api-reference/models/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.models.list(); + + for await (const model of list) { + console.log(model); + } + } + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner" + }, + { + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + }, + ], + "object": "list" + } + /models/{model}: + get: + operationId: retrieveModel + tags: + - Models + summary: Retrieves a model instance, providing basic information about the model + such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: gpt-4o-mini + description: The ID of the model to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Model" + x-oaiMeta: + name: Retrieve model + group: models + returns: The [model](/docs/api-reference/models/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/VAR_chat_model_id \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.retrieve("VAR_chat_model_id") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.retrieve("VAR_chat_model_id"); + + console.log(model); + } + + main(); + response: | + { + "id": "VAR_chat_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + } + delete: + operationId: deleteModel + tags: + - Models + summary: Delete a fine-tuned model. You must have the Owner role in your + organization to delete a model. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: ft:gpt-4o-mini:acemeco:suffix:abc123 + description: The model to delete + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteModelResponse" + x-oaiMeta: + name: Delete a fine-tuned model + group: models + returns: Deletion status. + examples: + request: + curl: > + curl + https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 + \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); + + console.log(model); + } + + main(); + response: | + { + "id": "ft:gpt-4o-mini:acemeco:suffix:abc123", + "object": "model", + "deleted": true + } + /moderations: + post: + operationId: createModeration + tags: + - Moderations + summary: | + Classifies if text and/or image inputs are potentially harmful. Learn + more in the [moderation guide](/docs/guides/moderation). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationResponse" + x-oaiMeta: + name: Create moderation + group: moderations + returns: A [moderation](/docs/api-reference/moderations/object) object. + examples: + - title: Single string + request: + curl: | + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + moderation = client.moderations.create(input="I want to kill + them.") + + print(moderation) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const moderation = await openai.moderations.create({ input: "I want to kill them." }); + + console.log(moderation); + } + + main(); + response: | + { + "id": "modr-AB8CjOTu2jiq12hp1AQPfeqFWaORR", + "model": "text-moderation-007", + "results": [ + { + "flagged": true, + "categories": { + "sexual": false, + "hate": false, + "harassment": true, + "self-harm": false, + "sexual/minors": false, + "hate/threatening": false, + "violence/graphic": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "harassment/threatening": true, + "violence": true + }, + "category_scores": { + "sexual": 0.000011726012417057063, + "hate": 0.22706663608551025, + "harassment": 0.5215635299682617, + "self-harm": 2.227119921371923e-6, + "sexual/minors": 7.107352217872176e-8, + "hate/threatening": 0.023547329008579254, + "violence/graphic": 0.00003391829886822961, + "self-harm/intent": 1.646940972932498e-6, + "self-harm/instructions": 1.1198755256458526e-9, + "harassment/threatening": 0.5694745779037476, + "violence": 0.9971134662628174 + } + } + ] + } + - title: Image and text + request: + curl: > + curl https://api.openai.com/v1/moderations \ + -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "omni-moderation-latest", + "input": [ + { "type": "text", "text": "...text to classify goes here..." }, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.png" + } + } + ] + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + response = client.moderations.create( + model="omni-moderation-latest", + input=[ + {"type": "text", "text": "...text to classify goes here..."}, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.png", + # can also use base64 encoded image URLs + # "url": "data:image/jpeg;base64,abcdefg..." + } + }, + ], + ) + + + print(response) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + const moderation = await openai.moderations.create({ + model: "omni-moderation-latest", + input: [ + { type: "text", text: "...text to classify goes here..." }, + { + type: "image_url", + image_url: { + url: "https://example.com/image.png" + // can also use base64 encoded image URLs + // url: "data:image/jpeg;base64,abcdefg..." + } + } + ], + }); + + + console.log(moderation); + response: | + { + "id": "modr-0d9740456c391e43c445bf0f010940c7", + "model": "omni-moderation-latest", + "results": [ + { + "flagged": true, + "categories": { + "harassment": true, + "harassment/threatening": true, + "sexual": false, + "hate": false, + "hate/threatening": false, + "illicit": false, + "illicit/violent": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "self-harm": false, + "sexual/minors": false, + "violence": true, + "violence/graphic": true + }, + "category_scores": { + "harassment": 0.8189693396524255, + "harassment/threatening": 0.804985420696006, + "sexual": 1.573112165348997e-6, + "hate": 0.007562942636942845, + "hate/threatening": 0.004208854591835476, + "illicit": 0.030535955153511665, + "illicit/violent": 0.008925306722380033, + "self-harm/intent": 0.00023023930975076432, + "self-harm/instructions": 0.0002293869201073356, + "self-harm": 0.012598046106750154, + "sexual/minors": 2.212566909570261e-8, + "violence": 0.9999992735124786, + "violence/graphic": 0.843064871157054 + }, + "category_applied_input_types": { + "harassment": [ + "text" + ], + "harassment/threatening": [ + "text" + ], + "sexual": [ + "text", + "image" + ], + "hate": [ + "text" + ], + "hate/threatening": [ + "text" + ], + "illicit": [ + "text" + ], + "illicit/violent": [ + "text" + ], + "self-harm/intent": [ + "text", + "image" + ], + "self-harm/instructions": [ + "text", + "image" + ], + "self-harm": [ + "text", + "image" + ], + "sexual/minors": [ + "text" + ], + "violence": [ + "text", + "image" + ], + "violence/graphic": [ + "text", + "image" + ] + } + } + ] + } + /organization/audit_logs: + get: + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs + tags: + - Audit Logs + parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this + range. + required: false + schema: type: object properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - ChunkingStrategyRequestParam: - type: object + gt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater + than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater + than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than + this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than + or equal to this value. + - name: project_ids[] + in: query + description: Return only events for these projects. + required: false + schema: + type: array + items: + type: string + - name: event_types[] + in: query + description: Return only events with a `type` in one of these values. For + example, `project.created`. For all options, see the documentation + for the [audit log object](/docs/api-reference/audit-logs/object). + required: false + schema: + type: array + items: + $ref: "#/components/schemas/AuditLogEventType" + - name: actor_ids[] + in: query + description: Return only events performed by these actors. Can be a user ID, a + service account ID, or an api key tracking ID. + required: false + schema: + type: array + items: + type: string + - name: actor_emails[] + in: query + description: Return only events performed by users with these emails. + required: false + schema: + type: array + items: + type: string + - name: resource_ids[] + in: query + description: Return only events performed on these targets. For example, a + project ID updated. + required: false + schema: + type: array + items: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: Audit logs listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListAuditLogsResponse" + x-oaiMeta: + name: List audit logs + group: audit-logs + returns: A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) + objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/audit_logs \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: > + { + "object": "list", + "data": [ + { + "id": "audit_log-xxx_yyyymmdd", + "type": "project.archived", + "effective_at": 1722461446, + "actor": { + "type": "api_key", + "api_key": { + "type": "user", + "user": { + "id": "user-xxx", + "email": "user@example.com" + } + } + }, + "project.archived": { + "id": "proj_abc" + }, + }, + { + "id": "audit_log-yyy__20240101", + "type": "api_key.updated", + "effective_at": 1720804190, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.updated": { + "id": "key_xxxx", + "data": { + "scopes": ["resource_2.operation_2"] + } + }, + } + ], + "first_id": "audit_log-xxx__20240101", + "last_id": "audit_log_yyy__20240101", + "has_more": true + } + /organization/costs: + get: + summary: Get costs details for the organization. + operationId: usage-costs + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently only `1d` is + supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1d + default: 1d + - name: project_ids + in: query + description: Return only costs for these projects. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the costs by the specified fields. Support fields include + `project_id`, `line_item` and any combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - line_item + - name: limit + in: query + description: > + A limit on the number of buckets to be returned. Limit can range + between 1 and 180, and the default is 7. + required: false + schema: + type: integer + default: 7 + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Costs data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Costs + group: usage-costs + returns: A list of paginated, time bucketed + [Costs](/docs/api-reference/usage/costs_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/costs?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: | + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.costs.result", + "amount": { + "value": 0.06, + "currency": "usd" + }, + "line_item": null, + "project_id": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/invites: + get: + summary: Returns a list of invites in the organization. + operationId: list-invites + tags: + - Invites + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Invites listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/InviteListResponse" + x-oaiMeta: + name: List invites + group: administration + returns: A list of [Invite](/docs/api-reference/invite/object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + ], + "first_id": "invite-abc", + "last_id": "invite-abc", + "has_more": false + } + post: + summary: Create an invite for a user to the organization. The invite must be + accepted by the user before they have access to the organization. + operationId: inviteUser + tags: + - Invites + requestBody: + description: The invite request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/InviteRequest" + responses: + "200": + description: User invited successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Invite" + x-oaiMeta: + name: Create invite + group: administration + returns: The created [Invite](/docs/api-reference/invite/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/invites \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "role": "owner" + }' + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": null + } + /organization/invites/{invite_id}: + get: + summary: Retrieves an invite. + operationId: retrieve-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to retrieve. + responses: + "200": + description: Invite retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Invite" + x-oaiMeta: + name: Retrieve invite + group: administration + returns: The [Invite](/docs/api-reference/invite/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + delete: + summary: Delete an invite. If the invite has already been accepted, it cannot be + deleted. + operationId: delete-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to delete. + responses: + "200": + description: Invite deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/InviteDeleteResponse" + x-oaiMeta: + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite.deleted", + "id": "invite-abc", + "deleted": true + } + /organization/projects: + get: + summary: Returns a list of projects. + operationId: list-projects + tags: + - Projects + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + - name: include_archived + in: query + schema: + type: boolean + default: false + description: If `true` returns all projects including those that have been + `archived`. Archived projects are not included by default. + responses: + "200": + description: Projects listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectListResponse" + x-oaiMeta: + name: List projects + group: administration + returns: A list of [Project](/docs/api-reference/projects/object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ], + "first_id": "proj-abc", + "last_id": "proj-xyz", + "has_more": false + } + post: + summary: Create a new project in the organization. Projects can be created and + archived, but cannot be deleted. + operationId: create-project + tags: + - Projects + requestBody: + description: The project create request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectCreateRequest" + responses: + "200": + description: Project created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + x-oaiMeta: + name: Create project + group: administration + returns: The created [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project ABC" + }' + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project ABC", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + /organization/projects/{project_id}: + get: + summary: Retrieves a project. + operationId: retrieve-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + x-oaiMeta: + name: Retrieve project + group: administration + description: Retrieve a project. + returns: The [Project](/docs/api-reference/projects/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + post: + summary: Modifies a project in the organization. + operationId: modify-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project update request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUpdateRequest" + responses: + "200": + description: Project updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + "400": + description: Error response when updating the default project. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Modify project + group: administration + returns: The updated [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project DEF" + }' + /organization/projects/{project_id}/api_keys: + get: + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project API keys listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectApiKeyListResponse" + x-oaiMeta: + name: List project API keys + group: administration + returns: A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + ], + "first_id": "key_abc", + "last_id": "key_xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + /organization/projects/{project_id}/api_keys/{key_id}: + get: + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectApiKey" + x-oaiMeta: + name: Retrieve project API key + group: administration + returns: The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object + matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + delete: + summary: Deletes an API key from the project. + operationId: delete-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectApiKeyDeleteResponse" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a + service account + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key.deleted", + "id": "key_abc", + "deleted": true + } + error_response: + content: > + { + "code": 400, + "message": "API keys cannot be deleted for service accounts, please delete the service account" + } + /organization/projects/{project_id}/archive: + post: + summary: Archives a project in the organization. Archived projects cannot be + used or updated. + operationId: archive-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project archived successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + x-oaiMeta: + name: Archive project + group: administration + returns: The archived [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/archive \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project DEF", + "created_at": 1711471533, + "archived_at": 1711471533, + "status": "archived" + } + /organization/projects/{project_id}/rate_limits: + get: + summary: Returns the rate limits per model for a project. + operationId: list-project-rate-limits + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: | + A limit on the number of objects to be returned. The default is 100. + required: false + schema: + type: integer + default: 100 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, beginning with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project rate limits listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectRateLimitListResponse" + x-oaiMeta: + name: List project rate limits + group: administration + returns: A list of + [ProjectRateLimit](/docs/api-reference/project-rate-limits/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/rate_limits?after=rl_xxx&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: | + { + "object": "list", + "data": [ + { + "object": "project.rate_limit", + "id": "rl-ada", + "model": "ada", + "max_requests_per_1_minute": 600, + "max_tokens_per_1_minute": 150000, + "max_images_per_1_minute": 10 + } + ], + "first_id": "rl-ada", + "last_id": "rl-ada", + "has_more": false + } + error_response: | + { + "code": 404, + "message": "The project {project_id} was not found" + } + /organization/projects/{project_id}/rate_limits/{rate_limit_id}: + post: + summary: Updates a project rate limit. + operationId: update-project-rate-limits + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: rate_limit_id + in: path + description: The ID of the rate limit. + required: true + schema: + type: string + requestBody: + description: The project rate limit update request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectRateLimitUpdateRequest" + responses: + "200": + description: Project rate limit updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectRateLimit" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Modify project rate limit + group: administration + returns: The updated + [ProjectRateLimit](/docs/api-reference/project-rate-limits/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/rate_limits/rl_xxx + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "max_requests_per_1_minute": 500 + }' + response: | + { + "object": "project.rate_limit", + "id": "rl-ada", + "model": "ada", + "max_requests_per_1_minute": 600, + "max_tokens_per_1_minute": 150000, + "max_images_per_1_minute": 10 + } + error_response: | + { + "code": 404, + "message": "The project {project_id} was not found" + } + /organization/projects/{project_id}/service_accounts: + get: + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project service accounts listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountListResponse" + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: List project service accounts + group: administration + returns: A list of + [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ], + "first_id": "svc_acct_abc", + "last_id": "svc_acct_xyz", + "has_more": false + } + post: + summary: Creates a new service account in the project. This also returns an + unredacted API key for the service account. + operationId: create-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project service account create request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountCreateRequest" + responses: + "200": + description: Project service account created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountCreateResponse" + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Create project service account + group: administration + returns: The created + [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Production App" + }' + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Production App", + "role": "member", + "created_at": 1711471533, + "api_key": { + "object": "organization.project.service_account.api_key", + "value": "sk-abcdefghijklmnop123", + "name": "Secret Key", + "created_at": 1711471533, + "id": "key_abc" + } + } + /organization/projects/{project_id}/service_accounts/{service_account_id}: + get: + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccount" + x-oaiMeta: + name: Retrieve project service account + group: administration + returns: The + [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) + object matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + delete: + summary: Deletes a service account from the project. + operationId: delete-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountDeleteResponse" + x-oaiMeta: + name: Delete project service account + group: administration + returns: Confirmation of service account being deleted, or an error in case of + an archived project, which has no service accounts + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account.deleted", + "id": "svc_acct_abc", + "deleted": true + } + /organization/projects/{project_id}/users: + get: + summary: Returns a list of users in the project. + operationId: list-project-users + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project users listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserListResponse" + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: List project users + group: administration + returns: A list of [ProjectUser](/docs/api-reference/project-users/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + post: + summary: Adds a user to the project. Users must already be members of the + organization to be added to a project. + operationId: create-project-user + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + tags: + - Projects + requestBody: + description: The project user create request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserCreateRequest" + responses: + "200": + description: User added to project successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUser" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Create project user + group: administration + returns: The created [ProjectUser](/docs/api-reference/project-users/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/users \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "user_id": "user_abc", + "role": "member" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + /organization/projects/{project_id}/users/{user_id}: + get: + summary: Retrieves a user in the project. + operationId: retrieve-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUser" + x-oaiMeta: + name: Retrieve project user + group: administration + returns: The [ProjectUser](/docs/api-reference/project-users/object) object + matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + post: + summary: Modifies a user's role in the project. + operationId: modify-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + requestBody: + description: The project user update request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserUpdateRequest" + responses: + "200": + description: Project user's role updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUser" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Modify project user + group: administration + returns: The updated [ProjectUser](/docs/api-reference/project-users/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + delete: + summary: Deletes a user from the project. + operationId: delete-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserDeleteResponse" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Delete project user + group: administration + returns: Confirmation that project has been deleted or an error in case of an + archived project, which has no users + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user.deleted", + "id": "user_abc", + "deleted": true + } + /organization/usage/audio_speeches: + get: + summary: Get audio speeches usage details for the organization. + operationId: usage-audio-speeches + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Audio speeches + group: usage-audio-speeches + returns: A list of paginated, time bucketed [Audio speeches + usage](/docs/api-reference/usage/audio_speeches_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/audio_speeches?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.audio_speeches.result", + "characters": 45, + "num_model_requests": 1, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/audio_transcriptions: + get: + summary: Get audio transcriptions usage details for the organization. + operationId: usage-audio-transcriptions + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Audio transcriptions + group: usage-audio-transcriptions + returns: A list of paginated, time bucketed [Audio transcriptions + usage](/docs/api-reference/usage/audio_transcriptions_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/audio_transcriptions?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.audio_transcriptions.result", + "seconds": 20, + "num_model_requests": 1, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/code_interpreter_sessions: + get: + summary: Get code interpreter sessions usage details for the organization. + operationId: usage-code-interpreter-sessions + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Code interpreter sessions + group: usage-code-interpreter-sessions + returns: A list of paginated, time bucketed [Code interpreter sessions + usage](/docs/api-reference/usage/code_interpreter_sessions_object) + objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/code_interpreter_sessions?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.code_interpreter_sessions.result", + "sessions": 1, + "project_id": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/completions: + get: + summary: Get completions usage details for the organization. + operationId: usage-completions + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: batch + in: query + description: > + If `true`, return batch jobs only. If `false`, return non-batch jobs + only. By default, return both. + required: false + schema: + type: boolean + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model`, `batch` or + any combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - batch + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Completions + group: usage-completions + returns: A list of paginated, time bucketed [Completions + usage](/docs/api-reference/usage/completions_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/completions?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.completions.result", + "input_tokens": 1000, + "output_tokens": 500, + "input_cached_tokens": 800, + "num_model_requests": 5, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null, + "batch": null + } + ] + } + ], + "has_more": true, + "next_page": "AAAAAGdGxdEiJdKOAAAAAGcqsYA=" + } + /organization/usage/embeddings: + get: + summary: Get embeddings usage details for the organization. + operationId: usage-embeddings + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Embeddings + group: usage-embeddings + returns: A list of paginated, time bucketed [Embeddings + usage](/docs/api-reference/usage/embeddings_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/embeddings?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.embeddings.result", + "input_tokens": 16, + "num_model_requests": 2, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/images: + get: + summary: Get images usage details for the organization. + operationId: usage-images + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: sources + in: query + description: Return only usages for these sources. Possible values are + `image.generation`, `image.edit`, `image.variation` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - image.generation + - image.edit + - image.variation + - name: sizes + in: query + description: Return only usages for these image sizes. Possible values are + `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1792 + - 1024x1792 + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model`, `size`, + `source` or any combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - size + - source + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Images + group: usage-images + returns: A list of paginated, time bucketed [Images + usage](/docs/api-reference/usage/images_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/images?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: | + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.images.result", + "images": 2, + "num_model_requests": 2, + "size": null, + "source": null, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/moderations: + get: + summary: Get moderations usage details for the organization. + operationId: usage-moderations + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Moderations + group: usage-moderations + returns: A list of paginated, time bucketed [Moderations + usage](/docs/api-reference/usage/moderations_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/moderations?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.moderations.result", + "input_tokens": 16, + "num_model_requests": 2, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/vector_stores: + get: + summary: Get vector stores usage details for the organization. + operationId: usage-vector-stores + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Vector stores + group: usage-vector-stores + returns: A list of paginated, time bucketed [Vector stores + usage](/docs/api-reference/usage/vector_stores_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/vector_stores?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.vector_stores.result", + "usage_bytes": 1024, + "project_id": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/users: + get: + summary: Lists all of the users in the organization. + operationId: list-users + tags: + - Users + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Users listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UserListResponse" + x-oaiMeta: + name: List users + group: administration + returns: A list of [User](/docs/api-reference/users/object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/users?after=user_abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + /organization/users/{user_id}: + get: + summary: Retrieves a user by their identifier. + operationId: retrieve-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/User" + x-oaiMeta: + name: Retrieve user + group: administration + returns: The [User](/docs/api-reference/users/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + post: + summary: Modifies a user's role in the organization. + operationId: modify-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UserRoleUpdateRequest" + responses: + "200": + description: User role updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/User" + x-oaiMeta: + name: Modify user + group: administration + returns: The updated [User](/docs/api-reference/users/object) object. + examples: + request: + curl: > + curl -X POST https://api.openai.com/v1/organization/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + delete: + summary: Deletes a user from the organization. + operationId: delete-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UserDeleteResponse" + x-oaiMeta: + name: Delete user + group: administration + returns: Confirmation of the deleted user + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user.deleted", + "id": "user_abc", + "deleted": true + } + /threads: + post: + operationId: createThread + tags: + - Assistants + summary: Create a thread. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Create thread + group: threads + beta: true + returns: A [thread](/docs/api-reference/threads) object. + examples: + - title: Empty + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '' + python: | + from openai import OpenAI + client = OpenAI() + + empty_thread = client.beta.threads.create() + print(empty_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const emptyThread = await openai.beta.threads.create(); + + console.log(emptyThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699012949, + "metadata": {}, + "tool_resources": {} + } + - title: Messages + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "messages": [{ + "role": "user", + "content": "Hello, what is AI?" + }, { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }] + }' + python: | + from openai import OpenAI + client = OpenAI() + + message_thread = client.beta.threads.create( + messages=[ + { + "role": "user", + "content": "Hello, what is AI?" + }, + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }, + ] + ) + + print(message_thread) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const messageThread = await openai.beta.threads.create({ + messages: [ + { + role: "user", + content: "Hello, what is AI?" + }, + { + role: "user", + content: "How does AI work? Explain it in simple terms.", + }, + ], + }); + + console.log(messageThread); + } + + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {}, + "tool_resources": {} + } + /threads/runs: + post: + operationId: createThreadAndRun + tags: + - Assistants + summary: Create a thread and run it in one request. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadAndRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create thread and run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + - title: Default + request: + curl: > + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + run = client.beta.threads.create_and_run( + assistant_id="asst_abc123", + thread={ + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + ) + + + print(run) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const run = await openai.beta.threads.createAndRun({ + assistant_id: "asst_abc123", + thread: { + messages: [ + { role: "user", content: "Explain deep learning to a 5 year old." }, + ], + }, + }); + + console.log(run); + } + + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076792, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": null, + "expires_at": 1699077392, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "required_action": null, + "last_error": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant.", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": null, + "max_prompt_tokens": null, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "incomplete_details": null, + "usage": null, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_123", + "thread": { + "messages": [ + {"role": "user", "content": "Hello"} + ] + }, + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.create_and_run( + assistant_id="asst_123", + thread={ + "messages": [ + {"role": "user", "content": "Hello"} + ] + }, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "Hello" }, + ], + }, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: > + event: thread.created + + data: + {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} + + + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], + "metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], + "metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + today"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! + How can I assist you today?","annotations":[]}}], "metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + + event: thread.run.completed + + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: done + + data: [DONE] + - title: Streaming with Functions + request: + curl: > + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + + stream = client.beta.threads.create_and_run( + thread={ + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + + for event in stream: + print(event) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "What is the weather like in San Francisco?" }, + ], + }, + tools: tools, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } + } + + + main(); + response: > + event: thread.created + + data: + {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} + + + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} + + + ... + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} + + + event: thread.run.requires_action + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San + Francisco, + CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + /threads/{thread_id}: + get: + operationId: getThread + tags: + - Assistants + summary: Retrieves a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Retrieve thread + group: threads + beta: true + returns: The [thread](/docs/api-reference/threads/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_thread = client.beta.threads.retrieve("thread_abc123") + print(my_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myThread = await openai.beta.threads.retrieve( + "thread_abc123" + ); + + console.log(myThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {}, + "tool_resources": { + "code_interpreter": { + "file_ids": [] + } + } + } + post: + operationId: modifyThread + tags: + - Assistants + summary: Modifies a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to modify. Only the `metadata` can be modified. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Modify thread + group: threads + beta: true + returns: The modified [thread](/docs/api-reference/threads/object) object + matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + my_updated_thread = client.beta.threads.update( + "thread_abc123", + metadata={ + "modified": "true", + "user": "abc123" + } + ) + print(my_updated_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const updatedThread = await openai.beta.threads.update( + "thread_abc123", + { + metadata: { modified: "true", user: "abc123" }, + } + ); + + console.log(updatedThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": { + "modified": "true", + "user": "abc123" + }, + "tool_resources": {} + } + delete: + operationId: deleteThread + tags: + - Assistants + summary: Delete a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteThreadResponse" + x-oaiMeta: + name: Delete thread + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + response = client.beta.threads.delete("thread_abc123") + print(response) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.beta.threads.del("thread_abc123"); + + console.log(response); + } + main(); + response: | + { + "id": "thread_abc123", + "object": "thread.deleted", + "deleted": true + } + /threads/{thread_id}/messages: + get: + operationId: listMessages + tags: + - Assistants + summary: Returns a list of messages for a given thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) the messages + belong to. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: run_id + in: query + description: | + Filter messages by the run ID that generated them. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListMessagesResponse" + x-oaiMeta: + name: List messages + group: threads + beta: true + returns: A list of [message](/docs/api-reference/messages) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: > + from openai import OpenAI + + client = OpenAI() + + + thread_messages = + client.beta.threads.messages.list("thread_abc123") + + print(thread_messages.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.list( + "thread_abc123" + ); + + console.log(threadMessages.data); + } + + main(); + response: > + { + "object": "list", + "data": [ + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699016383, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + }, + { + "id": "msg_abc456", + "object": "thread.message", + "created_at": 1699016383, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "Hello, what is AI?", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + ], + "first_id": "msg_abc123", + "last_id": "msg_abc456", + "has_more": false + } + post: + operationId: createMessage + tags: + - Assistants + summary: Create a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to create a + message for. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Create message + group: threads + beta: true + returns: A [message](/docs/api-reference/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }' + python: | + from openai import OpenAI + client = OpenAI() + + thread_message = client.beta.threads.messages.create( + "thread_abc123", + role="user", + content="How does AI work? Explain it in simple terms.", + ) + print(thread_message) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const threadMessages = await openai.beta.threads.messages.create( + "thread_abc123", + { role: "user", content: "How does AI work? Explain it in simple terms." } + ); + + console.log(threadMessages); + } + + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1713226573, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + /threads/{thread_id}/messages/{message_id}: + get: + operationId: getMessage + tags: + - Assistants + summary: Retrieve a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this + message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Retrieve message + group: threads + beta: true + returns: The [message](/docs/api-reference/messages/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 + \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.retrieve( + message_id="msg_abc123", + thread_id="thread_abc123", + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.retrieve( + "thread_abc123", + "msg_abc123" + ); + + console.log(message); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + post: + operationId: modifyMessage + tags: + - Assistants + summary: Modifies a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Modify message + group: threads + beta: true + returns: The modified [message](/docs/api-reference/messages/object) object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 + \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.update( + message_id="msg_abc12", + thread_id="thread_abc123", + metadata={ + "modified": "true", + "user": "abc123", + }, + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.update( + "thread_abc123", + "msg_abc123", + { + metadata: { + modified: "true", + user: "abc123", + }, + } + }' + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "metadata": { + "modified": "true", + "user": "abc123" + } + } + delete: + operationId: deleteMessage + tags: + - Assistants + summary: Deletes a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteMessageResponse" + x-oaiMeta: + name: Delete message + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 + \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + deleted_message = client.beta.threads.messages.delete( + message_id="msg_abc12", + thread_id="thread_abc123", + ) + print(deleted_message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const deletedMessage = await openai.beta.threads.messages.del( + "thread_abc123", + "msg_abc123" + ); + + console.log(deletedMessage); + } + response: | + { + "id": "msg_abc123", + "object": "thread.message.deleted", + "deleted": true + } + /threads/{thread_id}/runs: + get: + operationId: listRuns + tags: + - Assistants + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run belongs to. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunsResponse" + x-oaiMeta: + name: List runs + group: threads + beta: true + returns: A list of [run](/docs/api-reference/runs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + runs = client.beta.threads.runs.list( + "thread_abc123" + ) + + print(runs) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const runs = await openai.beta.threads.runs.list( + "thread_abc123" + ); + + console.log(runs); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + }, + { + "id": "run_abc456", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + ], + "first_id": "run_abc123", + "last_id": "run_abc456", + "has_more": false + } + post: + operationId: createRun + tags: + - Assistants + summary: Create a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to run. + - name: include[] + in: query + description: > + A list of additional fields to include in the response. Currently + the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch + the file search result content. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + schema: + type: array + items: + type: string + enum: + - step_details.tool_calls[*].file_search.results[*].content + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.create( + "thread_abc123", + { assistant_id: "asst_abc123" } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_123", + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.runs.create( + thread_id="thread_123", + assistant_id="asst_123", + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_123", + { assistant_id: "asst_123", stream: true } + ); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: > + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + today"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! + How can I assist you today?","annotations":[]}}],"metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + + event: thread.run.completed + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + - title: Streaming with Functions + request: + curl: > + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + + stream = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + + for event in stream: + print(event) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_abc123", + { + assistant_id: "asst_abc123", + tools: tools, + stream: true + } + ); + + for await (const event of stream) { + console.log(event); + } + } + + + main(); + response: > + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + today"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! + How can I assist you today?","annotations":[]}}],"metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + + event: thread.run.completed + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + /threads/{thread_id}/runs/{run_id}: + get: + operationId: getRun + tags: + - Assistants + summary: Retrieves a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Retrieve run + group: threads + beta: true + returns: The [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.retrieve( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.retrieve( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + post: + operationId: modifyRun + tags: + - Assistants + summary: Modifies a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Modify run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "user_id": "user_abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.update( + thread_id="thread_abc123", + run_id="run_abc123", + metadata={"user_id": "user_abc123"}, + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.update( + "thread_abc123", + "run_abc123", + { + metadata: { + user_id: "user_abc123", + }, + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": { + "user_id": "user_abc123" + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + /threads/{thread_id}/runs/{run_id}/cancel: + post: + operationId: cancelRun + tags: + - Assistants + summary: Cancels a run that is `in_progress`. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Cancel a run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.cancel( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.cancel( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076126, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "cancelling", + "started_at": 1699076126, + "expires_at": 1699076726, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4o", + "instructions": "You summarize books.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": ["vs_123"] + } + }, + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + /threads/{thread_id}/runs/{run_id}/steps: + get: + operationId: listRunSteps + tags: + - Assistants + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run and run steps belong to. + - name: run_id + in: path + required: true + schema: + type: string + description: The ID of the run the run steps belong to. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: include[] + in: query + description: > + A list of additional fields to include in the response. Currently + the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch + the file search result content. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + schema: + type: array + items: + type: string + enum: + - step_details.tool_calls[*].file_search.results[*].content + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunStepsResponse" + x-oaiMeta: + name: List run steps + group: threads + beta: true + returns: A list of [run step](/docs/api-reference/run-steps/step-object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run_steps = client.beta.threads.runs.steps.list( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run_steps) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.list( + "thread_abc123", + "run_abc123" + ); + console.log(runStep); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + ], + "first_id": "step_abc123", + "last_id": "step_abc456", + "has_more": false + } + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + operationId: getRunStep + tags: + - Assistants + summary: Retrieves a run step. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which the run and run step belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to which the run step belongs. + - in: path + name: step_id + required: true + schema: + type: string + description: The ID of the run step to retrieve. + - name: include[] + in: query + description: > + A list of additional fields to include in the response. Currently + the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch + the file search result content. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + schema: + type: array + items: + type: string + enum: + - step_details.tool_calls[*].file_search.results[*].content + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunStepObject" + x-oaiMeta: + name: Retrieve run step + group: threads + beta: true + returns: The [run step](/docs/api-reference/run-steps/step-object) object + matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run_step = client.beta.threads.runs.steps.retrieve( + thread_id="thread_abc123", + run_id="run_abc123", + step_id="step_abc123" + ) + + print(run_step) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.retrieve( + "thread_abc123", + "run_abc123", + "step_abc123" + ); + console.log(runStep); + } + + main(); + response: | + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + operationId: submitToolOuputsToRun + tags: + - Assistants + summary: > + When a run has the `status: "requires_action"` and + `required_action.type` is `submit_tool_outputs`, this endpoint can be + used to submit the outputs from the tool calls once they're all + completed. All outputs must be submitted in a single request. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this + run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run that requires the tool output submission. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SubmitToolOutputsRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Submit tool outputs to run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + - title: Default + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ] + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", + }, + ], + } + ); + + console.log(run); + } + + main(); + response: > + { + "id": "run_123", + "object": "thread.run", + "created_at": 1699075592, + "assistant_id": "asst_123", + "thread_id": "thread_123", + "status": "queued", + "started_at": 1699075592, + "expires_at": 1699076192, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + - title: Streaming + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ], + stream=True + ) + + for event in stream: + print(event) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const stream = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", + }, + ], + } + ); + + for await (const event of stream) { + console.log(event); + } + } + + + main(); + response: > + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San + Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and + sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + current"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + weather"}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + sunny"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The + current weather in San Francisco, CA is 70 degrees Fahrenheit and + sunny.","annotations":[]}}],"metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} + + + event: thread.run.completed + + data: + {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + /uploads: + post: + operationId: createUpload + tags: + - Uploads + summary: > + Creates an intermediate [Upload](/docs/api-reference/uploads/object) + object that you can add [Parts](/docs/api-reference/uploads/part-object) + to. Currently, an Upload can accept at most 8 GB in total and expires + after an hour after you create it. + + + Once you complete the Upload, we will create a + [File](/docs/api-reference/files/object) object that contains all the + parts you uploaded. This File is usable in the rest of our platform as a + regular File object. + + + For certain `purpose`s, the correct `mime_type` must be specified. + Please refer to documentation for the supported MIME types for your use + case: + + - [Assistants](/docs/assistants/tools/file-search#supported-files) + + + For guidance on the proper filename extensions for each purpose, please + follow the documentation on [creating a + File](/docs/api-reference/files/create). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Create upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status + `pending`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "purpose": "fine-tune", + "filename": "training_examples.jsonl", + "bytes": 2147483648, + "mime_type": "text/jsonl" + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "pending", + "expires_at": 1719127296 + } + /uploads/{upload_id}/cancel: + post: + operationId: cancelUpload + tags: + - Uploads + summary: | + Cancels the Upload. No Parts may be added after an Upload is cancelled. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Cancel upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status + `cancelled`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/cancel + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "cancelled", + "expires_at": 1719127296 + } + /uploads/{upload_id}/complete: + post: + operationId: completeUpload + tags: + - Uploads + summary: > + Completes the [Upload](/docs/api-reference/uploads/object). + + + Within the returned Upload object, there is a nested + [File](/docs/api-reference/files/object) object that is ready to use in + the rest of the platform. + + + You can specify the order of the Parts by passing in an ordered list of + the Part IDs. + + + The number of bytes uploaded upon completion must match the number of + bytes initially specified when creating the Upload object. No Parts may + be added after an Upload is completed. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CompleteUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Complete upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status + `completed` with an additional `file` property containing the created + usable File object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/complete + -d '{ + "part_ids": ["part_def456", "part_ghi789"] + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + /uploads/{upload_id}/parts: + post: + operationId: addUploadPart + tags: + - Uploads + summary: > + Adds a [Part](/docs/api-reference/uploads/part-object) to an + [Upload](/docs/api-reference/uploads/object) object. A Part represents a + chunk of bytes from the file you are trying to upload. + + + Each Part can be at most 64 MB, and you can add Parts until you hit the + Upload maximum of 8 GB. + + + It is possible to add multiple Parts in parallel. You can decide the + intended order of the Parts when you [complete the + Upload](/docs/api-reference/uploads/complete). + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/AddUploadPartRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/UploadPart" + x-oaiMeta: + name: Add upload part + group: uploads + returns: The upload [Part](/docs/api-reference/uploads/part-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/parts + -F data="aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz..." + response: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719185911, + "upload_id": "upload_abc123" + } + /vector_stores: + get: + operationId: listVectorStores + tags: + - Vector stores + summary: Returns a list of vector stores. + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoresResponse" + x-oaiMeta: + name: List vector stores + group: vector_stores + beta: true + returns: A list of [vector store](/docs/api-reference/vector-stores/object) + objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_stores = client.beta.vector_stores.list() + print(vector_stores) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStores = await openai.beta.vectorStores.list(); + console.log(vectorStores); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + }, + { + "id": "vs_abc456", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ v2", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + ], + "first_id": "vs_abc123", + "last_id": "vs_abc456", + "has_more": false + } + post: + operationId: createVectorStore + tags: + - Vector stores + summary: Create a vector store. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Create vector store + group: vector_stores + beta: true + returns: A [vector store](/docs/api-reference/vector-stores/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + -d '{ + "name": "Support FAQ" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.create( + name="Support FAQ" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.create({ + name: "Support FAQ" + }); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + /vector_stores/{vector_store_id}: + get: + operationId: getVectorStore + tags: + - Vector stores + summary: Retrieves a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Retrieve vector store + group: vector_stores + beta: true + returns: The [vector store](/docs/api-reference/vector-stores/object) object + matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.retrieve( + vector_store_id="vs_abc123" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.retrieve( + "vs_abc123" + ); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776 + } + post: + operationId: modifyVectorStore + tags: + - Vector stores + summary: Modifies a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateVectorStoreRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Modify vector store + group: vector_stores + beta: true + returns: The modified [vector store](/docs/api-reference/vector-stores/object) + object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + -d '{ + "name": "Support FAQ" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.update( + vector_store_id="vs_abc123", + name="Support FAQ" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.update( + "vs_abc123", + { + name: "Support FAQ" + } + ); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + delete: + operationId: deleteVectorStore + tags: + - Vector stores + summary: Delete a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteVectorStoreResponse" + x-oaiMeta: + name: Delete vector store + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + deleted_vector_store = client.beta.vector_stores.delete( + vector_store_id="vs_abc123" + ) + print(deleted_vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const deletedVectorStore = await openai.beta.vectorStores.del( + "vs_abc123" + ); + console.log(deletedVectorStore); + } + + main(); + response: | + { + id: "vs_abc123", + object: "vector_store.deleted", + deleted: true + } + /vector_stores/{vector_store_id}/file_batches: + post: + operationId: createVectorStoreFileBatch + tags: + - Vector stores + summary: Create a vector store file batch. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: | + The ID of the vector store for which to create a File Batch. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreFileBatchRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Create vector store file batch + group: vector_stores + beta: true + returns: A [vector store file + batch](/docs/api-reference/vector-stores-file-batches/batch-object) + object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "file_ids": ["file-abc123", "file-abc456"] + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + vector_store_file_batch = + client.beta.vector_stores.file_batches.create( + vector_store_id="vs_abc123", + file_ids=["file-abc123", "file-abc456"] + ) + + print(vector_store_file_batch) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( + "vs_abc123", + { + file_ids: ["file-abc123", "file-abc456"] + } + ); + console.log(myVectorStoreFileBatch); + } + + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "in_progress", + "file_counts": { + "in_progress": 1, + "completed": 1, + "failed": 0, + "cancelled": 0, + "total": 0, + } + } + /vector_stores/{vector_store_id}/file_batches/{batch_id}: + get: + operationId: getVectorStoreFileBatch + tags: + - Vector stores + summary: Retrieves a vector store file batch. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: The ID of the vector store that the file batch belongs to. + - in: path + name: batch_id + required: true + schema: + type: string + example: vsfb_abc123 + description: The ID of the file batch being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Retrieve vector store file batch + group: vector_stores + beta: true + returns: The [vector store file + batch](/docs/api-reference/vector-stores-file-batches/batch-object) + object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: > + from openai import OpenAI + + client = OpenAI() + + + vector_store_file_batch = + client.beta.vector_stores.file_batches.retrieve( + vector_store_id="vs_abc123", + batch_id="vsfb_abc123" + ) + + print(vector_store_file_batch) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( + "vs_abc123", + "vsfb_abc123" + ); + console.log(vectorStoreFileBatch); + } + + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "in_progress", + "file_counts": { + "in_progress": 1, + "completed": 1, + "failed": 0, + "cancelled": 0, + "total": 0, + } + } + /vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel: + post: + operationId: cancelVectorStoreFileBatch + tags: + - Vector stores + summary: Cancel a vector store file batch. This attempts to cancel the + processing of files in this batch as soon as possible. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store that the file batch belongs to. + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the file batch to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Cancel vector store file batch + group: vector_stores + beta: true + returns: The modified vector store file batch object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X POST + python: > + from openai import OpenAI + + client = OpenAI() + + + deleted_vector_store_file_batch = + client.beta.vector_stores.file_batches.cancel( + vector_store_id="vs_abc123", + file_batch_id="vsfb_abc123" + ) + + print(deleted_vector_store_file_batch) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( + "vs_abc123", + "vsfb_abc123" + ); + console.log(deletedVectorStoreFileBatch); + } + + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "cancelling", + "file_counts": { + "in_progress": 12, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 15, + } + } + /vector_stores/{vector_store_id}/file_batches/{batch_id}/files: + get: + operationId: listFilesInVectorStoreBatch + tags: + - Vector stores + summary: Returns a list of vector store files in a batch. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: batch_id + in: path + description: The ID of the file batch that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: filter + in: query + description: Filter by file status. One of `in_progress`, `completed`, `failed`, + `cancelled`. + schema: + type: string + enum: + - in_progress + - completed + - failed + - cancelled + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoreFilesResponse" + x-oaiMeta: + name: List vector store files in a batch + group: vector_stores + beta: true + returns: A list of [vector store + file](/docs/api-reference/vector-stores-files/file-object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: > + from openai import OpenAI + + client = OpenAI() + + + vector_store_files = + client.beta.vector_stores.file_batches.list_files( + vector_store_id="vs_abc123", + batch_id="vsfb_abc123" + ) + + print(vector_store_files) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( + "vs_abc123", + "vsfb_abc123" + ); + console.log(vectorStoreFiles); + } + + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + }, + { + "id": "file-abc456", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + /vector_stores/{vector_store_id}/files: + get: + operationId: listVectorStoreFiles + tags: + - Vector stores + summary: Returns a list of vector store files. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: filter + in: query + description: Filter by file status. One of `in_progress`, `completed`, `failed`, + `cancelled`. + schema: + type: string + enum: + - in_progress + - completed + - failed + - cancelled + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoreFilesResponse" + x-oaiMeta: + name: List vector store files + group: vector_stores + beta: true + returns: A list of [vector store + file](/docs/api-reference/vector-stores-files/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_files = client.beta.vector_stores.files.list( + vector_store_id="vs_abc123" + ) + print(vector_store_files) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFiles = await openai.beta.vectorStores.files.list( + "vs_abc123" + ); + console.log(vectorStoreFiles); + } + + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + }, + { + "id": "file-abc456", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + post: + operationId: createVectorStoreFile + tags: + - Vector stores + summary: Create a vector store file by attaching a + [File](/docs/api-reference/files) to a [vector + store](/docs/api-reference/vector-stores/object). + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: | + The ID of the vector store for which to create a File. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileObject" + x-oaiMeta: + name: Create vector store file + group: vector_stores + beta: true + returns: A [vector store + file](/docs/api-reference/vector-stores-files/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "file_id": "file-abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file = client.beta.vector_stores.files.create( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + print(vector_store_file) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const myVectorStoreFile = await openai.beta.vectorStores.files.create( + "vs_abc123", + { + file_id: "file-abc123" + } + ); + console.log(myVectorStoreFile); + } + + + main(); + response: | + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "usage_bytes": 1234, + "vector_store_id": "vs_abcd", + "status": "completed", + "last_error": null + } + /vector_stores/{vector_store_id}/files/{file_id}: + get: + operationId: getVectorStoreFile + tags: + - Vector stores + summary: Retrieves a vector store file. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: The ID of the vector store that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + example: file-abc123 + description: The ID of the file being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileObject" + x-oaiMeta: + name: Retrieve vector store file + group: vector_stores + beta: true + returns: The [vector store + file](/docs/api-reference/vector-stores-files/file-object) object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file = client.beta.vector_stores.files.retrieve( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + print(vector_store_file) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( + "vs_abc123", + "file-abc123" + ); + console.log(vectorStoreFile); + } + + + main(); + response: | + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abcd", + "status": "completed", + "last_error": null + } + delete: + operationId: deleteVectorStoreFile + tags: + - Vector stores + summary: Delete a vector store file. This will remove the file from the vector + store but the file itself will not be deleted. To delete the file, use + the [delete file](/docs/api-reference/files/delete) endpoint. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteVectorStoreFileResponse" + x-oaiMeta: + name: Delete vector store file + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: > + from openai import OpenAI + + client = OpenAI() + + + deleted_vector_store_file = + client.beta.vector_stores.files.delete( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + + print(deleted_vector_store_file) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( + "vs_abc123", + "file-abc123" + ); + console.log(deletedVectorStoreFile); + } + + + main(); + response: | + { + id: "file-abc123", + object: "vector_store.file.deleted", + deleted: true + } +components: + schemas: + AddUploadPartRequest: + type: object + additionalProperties: false + properties: + data: + description: | + The chunk of bytes for this Part. + type: string + format: binary + required: + - data + AssistantObject: + type: object + title: Assistant + description: Represents an `assistant` that can call the model and use tools. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `assistant`. + type: string + enum: + - assistant + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. + type: integer + name: + description: | + The name of the assistant. The maximum length is 256 characters. + type: string + maxLength: 256 + nullable: true + description: + description: > + The description of the assistant. The maximum length is 512 + characters. + type: string + maxLength: 512 + nullable: true + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + type: string + instructions: + description: > + The system instructions that the assistant uses. The maximum length + is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: > + A list of tool enabled on the assistant. There can be a maximum of + 128 tools per assistant. Tools can be of types `code_interpreter`, + `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" x-oaiExpandable: true - CreateVectorStoreFileRequest: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - ListVectorStoreFilesResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreFileObject' - first_id: - type: string - example: file-abc123 - last_id: - type: string - example: file-abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreFileResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - vector_store.file.deleted - type: string - VectorStoreFileBatchObject: - title: Vector store file batch - required: - - id - - object - - created_at - - vector_store_id - - status - - file_counts - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store.files_batch - type: string - description: 'The object type, which is always `vector_store.file_batch`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store files batch was created. - vector_store_id: - type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: - enum: - - in_progress - - completed - - cancelled - - failed - type: string - description: 'The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`.' - file_counts: - required: - - in_progress - - completed - - cancelled - - failed - - total - type: object - properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that where cancelled. - total: - type: integer - description: The total number of files. - description: A batch of files attached to a vector store. - x-oaiMeta: - name: The vector store files batch object - beta: true - example: "{\n \"id\": \"vsfb_123\",\n \"object\": \"vector_store.files_batch\",\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 100\n }\n}\n" - CreateVectorStoreFileBatchRequest: - required: - - file_ids - type: object - properties: + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: file_ids: - maxItems: 500 - minItems: 1 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - AssistantStreamEvent: - oneOf: - - $ref: '#/components/schemas/ThreadStreamEvent' - - $ref: '#/components/schemas/RunStreamEvent' - - $ref: '#/components/schemas/RunStepStreamEvent' - - $ref: '#/components/schemas/MessageStreamEvent' - - $ref: '#/components/schemas/ErrorEvent' - - $ref: '#/components/schemas/DoneEvent' - description: "Represents an event emitted when streaming a Run.\n\nEach event in a server-sent events stream has an `event` and `data` property:\n\n```\nevent: thread.created\ndata: {\"id\": \"thread_123\", \"object\": \"thread\", ...}\n```\n\nWe emit events whenever a new object is created, transitions to a new state, or is being\nstreamed in parts (deltas). For example, we emit `thread.run.created` when a new run\nis created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses\nto create a message during a run, we emit a `thread.message.created event`, a\n`thread.message.in_progress` event, many `thread.message.delta` events, and finally a\n`thread.message.completed` event.\n\nWe may add additional events over time, so we recommend handling unknown events gracefully\nin your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to\nintegrate the Assistants API with streaming.\n" - x-oaiMeta: - name: Assistant stream events - beta: true - ThreadStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.created - type: string - data: - $ref: '#/components/schemas/ThreadObject' - description: 'Occurs when a new [thread](/docs/api-reference/threads/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [thread](/docs/api-reference/threads/object)' - RunStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.created - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a new [run](/docs/api-reference/runs/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.queued - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.in_progress - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.requires_action - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.completed - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.incomplete - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.failed - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.cancelling - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.cancelled - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.expired - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - RunStepStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.created - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.in_progress - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.delta - type: string - data: - $ref: '#/components/schemas/RunStepDeltaObject' - description: 'Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.completed - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.failed - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.cancelled - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.expired - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - MessageStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.created - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.in_progress - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.delta - type: string - data: - $ref: '#/components/schemas/MessageDeltaObject' - description: 'Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.completed - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter`` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The ID of the [vector + store](/docs/api-reference/vector-stores/object) attached to + this assistant. There can be a maximum of 1 vector store + attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata + x-oaiMeta: + name: The assistant object + beta: true + example: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698984975, + "name": "Math Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + AssistantStreamEvent: + description: > + Represents an event emitted when streaming a Run. + + + Each event in a server-sent events stream has an `event` and `data` + property: + + + ``` + + event: thread.created + + data: {"id": "thread_123", "object": "thread", ...} + + ``` + + + We emit events whenever a new object is created, transitions to a new + state, or is being + + streamed in parts (deltas). For example, we emit `thread.run.created` + when a new run + + is created, `thread.run.completed` when a run completes, and so on. When + an Assistant chooses + + to create a message during a run, we emit a `thread.message.created + event`, a + + `thread.message.in_progress` event, many `thread.message.delta` events, + and finally a + + `thread.message.completed` event. + + + We may add additional events over time, so we recommend handling unknown + events gracefully + + in your code. See the [Assistants API + quickstart](/docs/assistants/overview) to learn how to + + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + AssistantToolsCode: + type: object + title: Code interpreter tool + properties: + type: + type: string + description: "The type of tool being defined: `code_interpreter`" + enum: + - code_interpreter + required: + - type + AssistantToolsFileSearch: + type: object + title: FileSearch tool + properties: + type: + type: string + description: "The type of tool being defined: `file_search`" + enum: + - file_search + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: > + The maximum number of results the file search tool should + output. The default is 20 for `gpt-4*` models and 5 for + `gpt-3.5-turbo`. This number should be between 1 and 50 + inclusive. + + + Note that the file search tool may output fewer than + `max_num_results` results. See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" + required: + - type + AssistantToolsFileSearchTypeOnly: + type: object + title: FileSearch tool + properties: + type: + type: string + description: "The type of tool being defined: `file_search`" + enum: + - file_search + required: + - type + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: + type: string + description: "The type of tool being defined: `function`" + enum: + - function + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + AssistantsApiResponseFormatOption: + description: > + Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), [GPT-4 + Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables + Structured Outputs which ensures the model will match your supplied JSON + schema. Learn more in the [Structured Outputs + guide](/docs/guides/structured-outputs). + + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures + the message the model generates is valid JSON. + + + **Important:** when using JSON mode, you **must** also instruct the + model to produce JSON yourself via a system or user message. Without + this, the model may generate an unending stream of whitespace until the + generation reaches the token limit, resulting in a long-running and + seemingly "stuck" request. Also note that the message content may be + partially cut off if `finish_reason="length"`, which indicates the + generation exceeded `max_tokens` or the conversation exceeded the max + context length. + oneOf: + - type: string + description: | + `auto` is the default value + enum: + - auto + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + AssistantsApiToolChoiceOption: + description: > + Controls which (if any) tool is called by the model. + + `none` means the model will not call any tools and instead generates a + message. + + `auto` is the default value and means the model can pick between + generating a message or calling one or more tools. + + `required` means the model must call one or more tools before responding + to the user. + + Specifying a particular tool like `{"type": "file_search"}` or `{"type": + "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates + a message. `auto` means the model can pick between generating a + message or calling one or more tools. `required` means the model + must call one or more tools before responding to the user. + enum: + - none + - auto + - required + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + AssistantsNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to + call a specific tool. + properties: + type: + type: string + enum: + - function + - code_interpreter + - file_search + description: The type of the tool. If type is `function`, the function name must + be set + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + AudioResponseFormat: + description: > + The format of the output, in one of these options: `json`, `text`, + `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + AuditLog: + type: object + description: A log of a user action or configuration change within this organization. + properties: + id: + type: string + description: The ID of this log. + type: + $ref: "#/components/schemas/AuditLogEventType" + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: + type: object + description: The project that the action was scoped to. Absent for actions not + scoped to projects. + properties: + id: + type: string + description: The project ID. + name: + type: string + description: The project title. + actor: + $ref: "#/components/schemas/AuditLogActor" + api_key.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + data: + type: object + description: The payload used to create the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. + `["api.model.request"]` + api_key.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + changes_requested: + type: object + description: The payload used to update the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. + `["api.model.request"]` + api_key.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + invite.sent: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + data: + type: object + description: The payload used to create the invite. + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or + `member`. + invite.accepted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + invite.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + login.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + logout.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + organization.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + description: The payload used to update the organization settings. + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: type: object properties: - event: - enum: - - thread.message.incomplete - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - ErrorEvent: - required: - - event - - data - type: object - properties: - event: - enum: - - error - type: string - data: - $ref: '#/components/schemas/Error' - description: 'Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout.' - x-oaiMeta: - dataDescription: '`data` is an [error](/docs/guides/error-codes/api-errors)' - DoneEvent: - required: - - event - - data - type: object - properties: - event: - enum: - - done - type: string - data: - enum: - - '[DONE]' - type: string - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: '`data` is `[DONE]`' - Batch: - required: - - id - - object - - endpoint - - input_file_id - - completion_window - - status - - created_at - type: object - properties: - id: - type: string - object: - enum: - - batch - type: string - description: 'The object type, which is always `batch`.' - endpoint: - type: string - description: The OpenAI API endpoint used by the batch. - errors: - type: object - properties: - object: - type: string - description: 'The object type, which is always `list`.' - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: 'The name of the parameter that caused the error, if applicable.' - nullable: true - line: - type: integer - description: 'The line number of the input file where the error occurred, if applicable.' - nullable: true - input_file_id: - type: string - description: The ID of the input file for the batch. - completion_window: - type: string - description: The time frame within which the batch should be processed. - status: - enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - type: string - description: The current status of the batch. - output_file_id: - type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: - type: string - description: The ID of the file containing the outputs of requests with errors. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - required: - - total - - completed - - failed - type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - description: The request counts for different statuses within the batch. - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - x-oaiMeta: - name: The batch object - example: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - BatchRequestInput: - type: object - properties: - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: - enum: - - POST - type: string - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: - type: string - description: 'The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.' - description: The per-line object of the batch input file - x-oaiMeta: - name: The request input object - example: "{\"custom_id\": \"request-1\", \"method\": \"POST\", \"url\": \"/v1/chat/completions\", \"body\": {\"model\": \"gpt-4o-mini\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is 2+2?\"}]}}\n" - BatchRequestOutput: - type: object - properties: - id: - type: string - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - description: The JSON body of the response - x-oaiTypeLabel: map - nullable: true - error: - type: object - properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - description: 'For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.' + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with + the Assistants API and Playground. One of `ANY_ROLE`, + `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs + for your organization. One of `ANY_ROLE` or `OWNERS`. + project.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + data: + type: object + description: The payload used to create the project. + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + project.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the project. + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + project.archived: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + rate_limit.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The rate limit ID + changes_requested: + type: object + description: The payload used to update the rate limits. + properties: + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only relevant for certain models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only relevant for certain + models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only relevant for certain models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only relevant for certain + models. + rate_limit.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The rate limit ID + service_account.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + data: + type: object + description: The payload used to create the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + description: The payload used to updated the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + user.added: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + data: + type: object + description: The payload used to add the user to the project. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the user. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + required: + - id + - type + - effective_at + - actor + x-oaiMeta: + name: The audit log object + example: > + { + "id": "req_xxx_20240101", + "type": "api_key.created", + "effective_at": 1720804090, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.created": { + "id": "key_xxxx", + "data": { + "scopes": ["resource.operation"] + } + } + } + AuditLogActor: + type: object + description: The actor who performed the audit logged action. + properties: + type: + type: string + description: The type of actor. Is either `session` or `api_key`. + enum: + - session + - api_key + session: + type: object + $ref: "#/components/schemas/AuditLogActorSession" + api_key: + type: object + $ref: "#/components/schemas/AuditLogActorApiKey" + AuditLogActorApiKey: + type: object + description: The API Key used to perform the audit logged action. + properties: + id: + type: string + description: The tracking id of the API key. + type: + type: string + description: The type of API key. Can be either `user` or `service_account`. + enum: + - user + - service_account + user: + $ref: "#/components/schemas/AuditLogActorUser" + service_account: + $ref: "#/components/schemas/AuditLogActorServiceAccount" + AuditLogActorServiceAccount: + type: object + description: The service account that performed the audit logged action. + properties: + id: + type: string + description: The service account id. + AuditLogActorSession: + type: object + description: The session in which the audit logged action was performed. + properties: + user: + $ref: "#/components/schemas/AuditLogActorUser" + ip_address: + type: string + description: The IP address from which the action was performed. + AuditLogActorUser: + type: object + description: The user who performed the audit logged action. + properties: + id: + type: string + description: The user id. + email: + type: string + description: The user email. + AuditLogEventType: + type: string + description: The event type. + x-oaiExpandable: true + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - rate_limit.updated + - rate_limit.deleted + - user.added + - user.updated + - user.deleted + AutoChunkingStrategyRequestParam: + type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a + `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: + - auto + required: + - type + Batch: + type: object + properties: + id: + type: string + object: + type: string + enum: + - batch + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. nullable: true - description: The per-line object of the batch output and error files - x-oaiMeta: - name: The request output object - example: "{\"id\": \"batch_req_wnaDys\", \"custom_id\": \"request-2\", \"response\": {\"status_code\": 200, \"request_id\": \"req_c187b3\", \"body\": {\"id\": \"chatcmpl-9758Iw\", \"object\": \"chat.completion\", \"created\": 1711475054, \"model\": \"gpt-4o-mini\", \"choices\": [{\"index\": 0, \"message\": {\"role\": \"assistant\", \"content\": \"2 + 2 equals 4.\"}, \"finish_reason\": \"stop\"}], \"usage\": {\"prompt_tokens\": 24, \"completion_tokens\": 15, \"total_tokens\": 39}, \"system_fingerprint\": null}}, \"error\": null}\n" - ListBatchesResponse: - required: - - object - - data - - has_more - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Batch' - first_id: - type: string - example: batch_abc123 - last_id: - type: string - example: batch_abc456 - has_more: - type: boolean - object: - enum: - - list - type: string - AuditLogActorServiceAccount: - type: object - properties: - id: - type: string - description: The service account id. - description: The service account that performed the audit logged action. - AuditLogActorUser: - type: object - properties: - id: - type: string - description: The user id. - email: - type: string - description: The user email. - description: The user who performed the audit logged action. - AuditLogActorApiKey: - type: object - properties: - id: - type: string - description: The tracking id of the API key. - type: - enum: - - user - - service_account - type: string - description: The type of API key. Can be either `user` or `service_account`. - user: - $ref: '#/components/schemas/AuditLogActorUser' - service_account: - $ref: '#/components/schemas/AuditLogActorServiceAccount' - description: The API Key used to perform the audit logged action. - AuditLogActorSession: - type: object - properties: - user: - $ref: '#/components/schemas/AuditLogActorUser' - ip_address: - type: string - description: The IP address from which the action was performed. - description: The session in which the audit logged action was performed. - AuditLogActor: + line: + type: integer + description: The line number of the input file where the error occurred, if + applicable. + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + type: string + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed + requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started + processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started + finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started + cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: + type: object + properties: + total: + type: integer + description: Total number of requests in the batch. + completed: + type: integer + description: Number of requests that have been completed successfully. + failed: + type: integer + description: Number of requests that have failed. + required: + - total + - completed + - failed + description: The request counts for different statuses within the batch. + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + x-oaiMeta: + name: The batch object + example: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + BatchRequestInput: + type: object + description: The per-line object of the batch input file + properties: + custom_id: + type: string + description: A developer-provided per-request id that will be used to match + outputs to inputs. Must be unique for each request in a batch. + method: + type: string + enum: + - POST + description: The HTTP method to be used for the request. Currently only `POST` + is supported. + url: + type: string + description: The OpenAI API relative URL to be used for the request. Currently + `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are + supported. + x-oaiMeta: + name: The request input object + example: > + {"custom_id": "request-1", "method": "POST", "url": + "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": + [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is 2+2?"}]}} + BatchRequestOutput: + type: object + description: The per-line object of the batch output and error files + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match + outputs to inputs. + response: + type: object + nullable: true + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include + this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: + type: object + nullable: true + description: For requests that failed with a non-HTTP error, this will contain + more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + x-oaiMeta: + name: The request output object + example: > + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": + {"status_code": 200, "request_id": "req_c187b3", "body": {"id": + "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, + "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": + "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], + "usage": {"prompt_tokens": 24, "completion_tokens": 15, + "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + CancelUploadRequest: + type: object + additionalProperties: false + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces + the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to + choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or + contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + ChatCompletionMessageToolCall: + type: object + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + required: + - name + - arguments + required: + - id + - type + - function + ChatCompletionMessageToolCallChunk: + type: object + properties: + index: + type: integer + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + required: + - index + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + ChatCompletionModalities: + type: array + nullable: true + description: > + Output types that you would like the model to generate for this request. + + Most models are capable of generating text, which is the default: + + + `["text"]` + + + The `gpt-4o-audio-preview` model can also be used to [generate + audio](/docs/guides/audio). To + + request that this model generate both text and audio responses, you can + + use: + + + `["text", "audio"]` + items: + type: string + enum: + - text + - audio + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to + call a specific function. + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: + x-oaiExpandable: true + nullable: true + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more + of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 + description: > + The contents of the assistant message. Required unless `tool_calls` + or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. + role: + type: string + enum: + - assistant + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model + information to differentiate between participants of the same role. + audio: + type: object + nullable: true + x-oaiExpandable: true + description: | + Data about a previous audio response from the model. + [Learn more](/docs/guides/audio). + required: + - id + properties: + id: + type: string + description: | + Unique identifier for a previous audio response from the model. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: Deprecated and replaced by `tool_calls`. The name and arguments of + a function that should be called, as generated by the model. + nullable: true + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: + - function + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + ChatCompletionRequestMessageContentPartAudio: + type: object + title: Audio content part + description: | + Learn about [audio inputs](/docs/guides/audio). + properties: + type: + type: string + enum: + - input_audio + description: The type of the content part. Always `input_audio`. + input_audio: + type: object + properties: + data: + type: string + description: Base64 encoded audio data. + format: + type: string + enum: + - wav + - mp3 + description: > + The format of the encoded audio data. Currently supports "wav" + and "mp3". + required: + - data + - format + required: + - type + - input_audio + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + description: | + Learn about [image inputs](/docs/guides/vision). + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision + guide](/docs/guides/vision#low-or-high-fidelity-image-understanding). + enum: + - auto + - low + - high + default: auto + required: + - url + required: + - type + - image_url + ChatCompletionRequestMessageContentPartRefusal: + type: object + title: Refusal content part + properties: + type: + type: string + enum: + - refusal + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. + required: + - type + - refusal + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + description: | + Learn about [text inputs](/docs/guides/text-generation). + properties: + type: + type: string + enum: + - text + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + oneOf: + - type: string + description: The contents of the system message. + title: Text content + - type: array + description: An array of content parts with a defined type. For system messages, + only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 + role: + type: string + enum: + - system + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model + information to differentiate between participants of the same role. + required: + - content + - role + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: + - tool + description: The role of the messages author, in this case `tool`. + content: + oneOf: + - type: string + description: The contents of the tool message. + title: Text content + - type: array + description: An array of content parts with a defined type. For tool messages, + only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" + minItems: 1 + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type. Supported options + differ based on the [model](/docs/models) being used to generate + the response. Can contain text, image, or audio inputs. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: + - user + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model + information to differentiate between participants of the same role. + required: + - content + - role + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartAudio" + x-oaiExpandable: true + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. + nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: + - assistant + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: Deprecated and replaced by `tool_calls`. The name and arguments of + a function that should be called, as generated by the model. + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + audio: + type: object + nullable: true + description: > + If the audio output modality is requested, this object contains data + + about the audio response from the model. [Learn + more](/docs/guides/audio). + x-oaiExpandable: true + required: + - id + - expires_at + - data + - transcript + properties: + id: + type: string + description: Unique identifier for this audio response. + expires_at: + type: integer + description: > + The Unix timestamp (in seconds) for when this audio response + will + + no longer be accessible on the server for use in multi-turn + + conversations. + data: + type: string + description: | + Base64 encoded audio bytes generated by the model, in the format + specified in the request. + transcript: + type: string + description: Transcript of the audio generated by the model. + required: + - role + - content + - refusal + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + ChatCompletionStreamOptions: + description: > + Options for streaming response. Only set this when you set `stream: + true`. + type: object + nullable: true + default: null + properties: + include_usage: + type: boolean + description: > + If set, an additional chunk will be streamed before the `data: + [DONE]` message. The `usage` field on this chunk shows the token + usage statistics for the entire request, and the `choices` field + will always be an empty array. All other chunks will also include a + `usage` field, but with a null value. + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: Deprecated and replaced by `tool_calls`. The name and arguments of + a function that should be called, as generated by the model. + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: + - system + - user + - assistant + - tool + description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + ChatCompletionTokenLogprob: + type: object + properties: + token: &a1 + description: The token. + type: string + logprob: &a2 + description: The log probability of this token, if it is within the top 20 most + likely tokens. Otherwise, the value `-9999.0` is used to signify + that the token is very unlikely. + type: number + bytes: &a3 + description: A list of integers representing the UTF-8 bytes representation of + the token. Useful in instances where characters are represented by + multiple tokens and their byte representations must be combined to + generate the correct text representation. Can be `null` if there is + no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this + token position. In rare cases, there may be fewer than the number of + requested `top_logprobs` returned. + type: array + items: type: object properties: - type: - enum: - - session - - api_key - type: string - description: The type of actor. Is either `session` or `api_key`. - session: - $ref: '#/components/schemas/AuditLogActorSession' - api_key: - $ref: '#/components/schemas/AuditLogActorApiKey' - description: The actor who performed the audit logged action. - AuditLogEventType: - enum: - - api_key.created - - api_key.updated - - api_key.deleted - - invite.sent - - invite.accepted - - invite.deleted - - login.succeeded - - login.failed - - logout.succeeded - - logout.failed - - organization.updated - - project.created - - project.updated - - project.archived - - service_account.created - - service_account.updated - - service_account.deleted - - user.added - - user.updated - - user.deleted - type: string - description: The event type. - x-oaiExpandable: true - AuditLog: + token: *a1 + logprob: *a2 + bytes: *a3 required: - - id - - type - - effective_at - - actor - type: object - properties: - id: - type: string - description: The ID of this log. - type: - $ref: '#/components/schemas/AuditLogEventType' - effective_at: - type: integer - description: The Unix timestamp (in seconds) of the event. - project: - type: object - properties: - id: - type: string - description: The project ID. - name: - type: string - description: The project title. - description: The project that the action was scoped to. Absent for actions not scoped to projects. - actor: - $ref: '#/components/schemas/AuditLogActor' - api_key.created: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - data: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to create the API key. - description: The details for events with this `type`. - api_key.updated: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - changes_requested: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to update the API key. - description: The details for events with this `type`. - api_key.deleted: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - description: The details for events with this `type`. - invite.sent: - type: object - properties: - id: - type: string - description: The ID of the invite. - data: - type: object - properties: - email: - type: string - description: The email invited to the organization. - role: - type: string - description: The role the email was invited to be. Is either `owner` or `member`. - description: The payload used to create the invite. - description: The details for events with this `type`. - invite.accepted: - type: object - properties: - id: - type: string - description: The ID of the invite. - description: The details for events with this `type`. - invite.deleted: - type: object - properties: - id: - type: string - description: The ID of the invite. - description: The details for events with this `type`. - login.failed: - type: object - properties: - error_code: - type: string - description: The error code of the failure. - error_message: - type: string - description: The error message of the failure. - description: The details for events with this `type`. - logout.failed: - type: object - properties: - error_code: - type: string - description: The error code of the failure. - error_message: - type: string - description: The error message of the failure. - description: The details for events with this `type`. - organization.updated: - type: object - properties: - id: - type: string - description: The organization ID. - changes_requested: - type: object - properties: - title: - type: string - description: The organization title. - description: - type: string - description: The organization description. - name: - type: string - description: The organization name. - settings: - type: object - properties: - threads_ui_visibility: - type: string - description: 'Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.' - usage_dashboard_visibility: - type: string - description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. - description: The payload used to update the organization settings. - description: The details for events with this `type`. - project.created: - type: object - properties: - id: - type: string - description: The project ID. - data: - type: object - properties: - name: - type: string - description: The project name. - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to create the project. - description: The details for events with this `type`. - project.updated: - type: object - properties: - id: - type: string - description: The project ID. - changes_requested: - type: object - properties: - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to update the project. - description: The details for events with this `type`. - project.archived: - type: object - properties: - id: - type: string - description: The project ID. - description: The details for events with this `type`. - service_account.created: - type: object - properties: - id: - type: string - description: The service account ID. - data: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to create the service account. - description: The details for events with this `type`. - service_account.updated: - type: object - properties: - id: - type: string - description: The service account ID. - changes_requested: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to updated the service account. - description: The details for events with this `type`. - service_account.deleted: - type: object - properties: - id: - type: string - description: The service account ID. - description: The details for events with this `type`. - user.added: + - token + - logprob + - bytes + required: + - token + - logprob + - bytes + - top_logprobs + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + ChatCompletionToolChoiceOption: + description: > + Controls which (if any) tool is called by the model. + + `none` means the model will not call any tool and instead generates a + message. + + `auto` means the model can pick between generating a message or calling + one or more tools. + + `required` means the model must call one or more tools. + + Specifying a particular tool via `{"type": "function", "function": + {"name": "my_function"}}` forces the model to call that tool. + + + `none` is the default when no tools are present. `auto` is the default + if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates + a message. `auto` means the model can pick between generating a + message or calling one or more tools. `required` means the model + must call one or more tools. + enum: + - none + - auto + - required + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + CompleteUploadRequest: + type: object + additionalProperties: false + properties: + part_ids: + type: array + description: | + The ordered list of Part IDs. + items: + type: string + md5: + description: > + The optional md5 checksum for the file contents to verify if the + bytes uploaded matches what you expect. + type: string + required: + - part_ids + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + type: object + description: Breakdown of tokens used in a completion. + properties: + accepted_prediction_tokens: + type: integer + description: | + When using Predicted Outputs, the number of tokens in the + prediction that appeared in the completion. + audio_tokens: + type: integer + description: Audio input tokens generated by the model. + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. + rejected_prediction_tokens: + type: integer + description: > + When using Predicted Outputs, the number of tokens in the + + prediction that did not appear in the completion. However, like + + reasoning tokens, these tokens are still counted in the total + + completion tokens for purposes of billing, output, and context + window + + limits. + prompt_tokens_details: + type: object + description: Breakdown of tokens used in the prompt. + properties: + audio_tokens: + type: integer + description: Audio input tokens present in the prompt. + cached_tokens: + type: integer + description: Cached tokens present in the prompt. + required: + - prompt_tokens + - completion_tokens + - total_tokens + CostsResult: + type: object + description: The aggregated costs details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.costs.result + amount: + type: object + description: The monetary value in its associated currency. + properties: + value: + type: number + description: The numeric value of the cost. + currency: + type: string + description: Lowercase ISO-4217 currency e.g. "usd" + line_item: + type: string + description: When `group_by=line_item`, this field provides the line item of the + grouped costs result. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped costs result. + required: + - object + - sessions + x-oaiMeta: + name: Costs object + example: | + { + "object": "orgainzation.costs.result", + "amount": { + "value": 0.06, + "currency": "usd" + }, + "line_item": "Image models", + "project_id": "proj_abc" + } + CreateAssistantRequest: + type: object + additionalProperties: false + properties: + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + name: + description: | + The name of the assistant. The maximum length is 256 characters. + type: string + nullable: true + maxLength: 256 + description: + description: > + The description of the assistant. The maximum length is 512 + characters. + type: string + nullable: true + maxLength: 512 + instructions: + description: > + The system instructions that the assistant uses. The maximum length + is 256,000 characters. + type: string + nullable: true + maxLength: 256000 + tools: + description: > + A list of tool enabled on the assistant. There can be a maximum of + 128 tools per assistant. Tools can be of types `code_interpreter`, + `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this assistant. There can be a maximum of 1 + vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: > + A helper to create a [vector + store](/docs/api-reference/vector-stores/object) with + file_ids and attach it to this assistant. There can be a + maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: type: object properties: - id: - type: string - description: The user ID. - data: - type: object + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs to add + to the vector store. There can be a maximum of 10000 + files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a + `max_chunk_size_tokens` of `800` and + `chunk_overlap_tokens` of `400`. + additionalProperties: false properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to add the user to the project. - description: The details for events with this `type`. - user.updated: - type: object - properties: - id: - type: string - description: The project ID. - changes_requested: - type: object + type: + type: string + description: Always `auto`. + enum: + - auto + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to update the user. - description: The details for events with this `type`. - user.deleted: - type: object - properties: - id: - type: string - description: The user ID. - description: The details for events with this `type`. - description: A log of a user action or configuration change within this organization. - x-oaiMeta: - name: The audit log object - example: "{\n \"id\": \"req_xxx_20240101\",\n \"type\": \"api_key.created\",\n \"effective_at\": 1720804090,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.created\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource.operation\"]\n }\n }\n}\n" - ListAuditLogsResponse: - required: - - object - - data - - first_id - - last_id - - has_more + type: + type: string + description: Always `static`. + enum: + - static + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is + `800`. The minimum value is `100` and the + maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: > + The number of tokens that overlap between + chunks. The default value is `400`. + + + Note that the overlap must not exceed half + of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: > + Set of 16 key-value pairs that can be attached to a + vector store. This can be useful for storing + additional information about the vector store in a + structured format. Keys can be a maximum of 64 + characters long and values can be a maximum of 512 + characters long. + x-oaiTypeLabel: map + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - model + CreateChatCompletionFunctionResponse: + type: object + description: Represents a chat completion response returned by model, based on + the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is + greater than 1. + items: type: object - properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/AuditLog' - first_id: - type: string - example: audit_log-defb456h8dks - last_id: - type: string - example: audit_log-hnbkd8s93s - has_more: - type: boolean - Invite: required: - - object - - id - - email - - role - - status - - invited_at - - expires_at - type: object + - finish_reason + - index + - message + - logprobs properties: - object: - enum: - - organization.invite - type: string - description: 'The object type, which is always `organization.invite`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - email: - type: string - description: The email address of the individual to whom the invite was sent - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - status: - enum: - - accepted - - expired - - pending - type: string - description: '`accepted`,`expired`, or `pending`' - invited_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was sent. - expires_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite expires. - accepted_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was accepted. - description: Represents an individual `invite` to the organization. - x-oaiMeta: - name: The invite object - example: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - InviteListResponse: - required: - - object - - data + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, `length` if the maximum number of tokens + specified in the request was reached, `content_filter` if + content was omitted due to a flag from our content filters, or + `function_call` if the model called a function. + enum: + - stop + - length + - function_call + - content_filter + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was + created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: + - chat.completion + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: | + { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-4o-mini", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 17, + "total_tokens": 99, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned + by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: > + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nThis image shows a wooden boardwalk extending through a lush green marshland.", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + CreateChatCompletionRequest: + type: object + properties: + messages: + description: > + A list of messages comprising the conversation so far. Depending on + the + + [model](/docs/models) you use, different message types (modalities) + are + + supported, like [text](/docs/guides/text-generation), + + [images](/docs/guides/vision), and [audio](/docs/guides/audio). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint + compatibility](/docs/models#model-endpoint-compatibility) table for + details on which models work with the Chat API. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - o1-preview + - o1-preview-2024-09-12 + - o1-mini + - o1-mini-2024-09-12 + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-realtime-preview + - gpt-4o-realtime-preview-2024-10-01 + - gpt-4o-audio-preview + - gpt-4o-audio-preview-2024-10-01 + - chatgpt-4o-latest + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + store: + type: boolean + default: false + nullable: true + description: > + Whether or not to store the output of this chat completion request + + for use in our [model distillation](/docs/guides/distillation) or + [evals](/docs/guides/evals) products. + metadata: + type: object + nullable: true + description: | + Developer-defined tags and values used for filtering completions + in the [dashboard](https://platform.openai.com/chat-completions). + additionalProperties: + type: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: > + Modify the likelihood of specified tokens appearing in the + completion. + + + Accepts a JSON object that maps tokens (specified by their token ID + in the tokenizer) to an associated bias value from -100 to 100. + Mathematically, the bias is added to the logits generated by the + model prior to sampling. The exact effect will vary per model, but + values between -1 and 1 should decrease or increase likelihood of + selection; values like -100 or 100 should result in a ban or + exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If + true, returns the log probabilities of each output token returned in + the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely + tokens to return at each token position, each with an associated log + probability. `logprobs` must be set to `true` if this parameter is + used. + type: integer + minimum: 0 + maximum: 20 + nullable: true + max_tokens: + description: > + The maximum number of [tokens](/tokenizer) that can be generated in + the chat completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + + This value is now deprecated in favor of `max_completion_tokens`, + and is not compatible with [o1 series + models](/docs/guides/reasoning). + type: integer + nullable: true + deprecated: true + max_completion_tokens: + description: > + An upper bound for the number of tokens that can be generated for a + completion, including visible output tokens and [reasoning + tokens](/docs/guides/reasoning). + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input + message. Note that you will be charged based on the number of + generated tokens across all of the choices. Keep `n` as `1` to + minimize costs. + modalities: + $ref: "#/components/schemas/ChatCompletionModalities" + prediction: + nullable: true + x-oaiExpandable: true + description: > + Configuration for a [Predicted + Output](/docs/guides/predicted-outputs), + + which can greatly improve response times when large parts of the + model + + response are known ahead of time. This is most common when you are + + regenerating a file with only minor changes to most of the content. + oneOf: + - $ref: "#/components/schemas/PredictionContent" + audio: + type: object + nullable: true + description: > + Parameters for audio output. Required when audio output is requested + with + + `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + required: + - voice + - format + x-oaiExpandable: true + properties: + voice: + type: string + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + description: > + The voice the model uses to respond. Supported voices are `ash`, + `ballad`, `coral`, `sage`, and `verse` (also supported but not + recommended are `alloy`, `echo`, and `shimmer`; these voices are + less expressive). + format: + type: string + enum: + - wav + - mp3 + - flac + - opus + - pcm16 + description: > + Specifies the output audio format. Must be one of `wav`, `mp3`, + `flac`, + + `opus`, or `pcm16`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + response_format: + description: > + An object specifying the format that the model must output. + Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o + mini](/docs/models#gpt-4o-mini), [GPT-4 + Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo + models newer than `gpt-3.5-turbo-1106`. + + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables + Structured Outputs which ensures the model will match your supplied + JSON schema. Learn more in the [Structured Outputs + guide](/docs/guides/structured-outputs). + + + Setting to `{ "type": "json_object" }` enables JSON mode, which + ensures the message the model generates is valid JSON. + + + **Important:** when using JSON mode, you **must** also instruct the + model to produce JSON yourself via a system or user message. Without + this, the model may generate an unending stream of whitespace until + the generation reaches the token limit, resulting in a long-running + and seemingly "stuck" request. Also note that the message content + may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation + exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + seed: + type: integer + minimum: -9223372036854776000 + maximum: 9223372036854776000 + nullable: true + description: > + This feature is in Beta. + + If specified, our system will make a best effort to sample + deterministically, such that repeated requests with the same `seed` + and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the + `system_fingerprint` response parameter to monitor changes in the + backend. + x-oaiMeta: + beta: true + service_tier: + description: > + Specifies the latency tier to use for processing the request. This + parameter is relevant for customers subscribed to the scale tier + service: + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: + - auto + - default + nullable: true + default: auto + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens + will be sent as data-only [server-sent + events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: + [DONE]` message. [Example Python + code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or `temperature` but not both. + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are + supported as a tool. Use this to provide a list of functions the + model may generate JSON inputs for. A max of 128 functions are + supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + function_call: + deprecated: true + description: > + Deprecated in favor of `tool_choice`. + + + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead + generates a message. + + `auto` means the model can pick between generating a message or + calling a function. + + Specifying a particular function via `{"name": "my_function"}` + forces the model to call that function. + + + `none` is the default when no functions are present. `auto` is the + default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead + generates a message. `auto` means the model can pick between + generating a message or calling a function. + enum: + - none + - auto + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true + description: | + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + required: + - model + - messages + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on + the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is + greater than 1. + items: type: object + required: + - finish_reason + - index + - message + - logprobs properties: - object: - enum: - - list - type: string - description: 'The object type, which is always `list`' - data: + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, + + `length` if the maximum number of tokens specified in the + request was reached, + + `content_filter` if content was omitted due to a flag from our + content filters, + + `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. type: array items: - $ref: '#/components/schemas/Invite' - first_id: - type: string - description: The first `invite_id` in the retrieved `list` - last_id: - type: string - description: The last `invite_id` in the retrieved `list` - has_more: - type: boolean - description: The `has_more` property is used for pagination to indicate there are additional results. - InviteRequest: - required: - - email - - role - type: object - properties: - email: - type: string - description: Send an email to this address - role: - enum: - - reader - - owner - type: string - description: '`owner` or `reader`' - InviteDeleteResponse: - required: - - object - - id - - deleted - type: object - properties: - object: - enum: - - organization.invite.deleted - type: string - description: 'The object type, which is always `organization.invite.deleted`' - id: - type: string - deleted: - type: boolean - User: - required: - - object - - id - - name - - email - - role - - added_at - type: object - properties: - object: - enum: - - organization.user - type: string - description: 'The object type, which is always `organization.user`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the user was added. - description: Represents an individual `user` within an organization. - x-oaiMeta: - name: The user object - example: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - UserListResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - enum: - - list - type: string - data: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + refusal: + description: A list of message refusal tokens with log probability information. type: array items: - $ref: '#/components/schemas/User' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - UserRoleUpdateRequest: - required: - - role - type: object - properties: - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - UserDeleteResponse: - required: - - object - - id - - deleted + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was + created. + model: + type: string + description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is + only included if the `service_tier` parameter is specified in the + request. + type: string + enum: + - scale + - default + example: scale + nullable: true + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: + - chat.completion + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: | + { + "id": "chatcmpl-123456", + "object": "chat.completion", + "created": 1728933352, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hi there! How can I assist you today?", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 19, + "completion_tokens": 10, + "total_tokens": 29, + "prompt_tokens_details": { + "cached_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "system_fingerprint": "fp_6b68a8204b" + } + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned + by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the + same ID. + choices: + type: array + description: > + A list of chat completion choices. Can contain more than one + elements if `n` is greater than 1. Can also be empty for the + + last chunk if you set `stream_options: {"include_usage": true}`. + items: type: object - properties: - object: - enum: - - organization.user.deleted - type: string - id: - type: string - deleted: - type: boolean - Project: required: - - id - - object - - name - - created_at - - status - type: object + - delta + - finish_reason + - index properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - object: - enum: - - organization.project - type: string - description: 'The object type, which is always `organization.project`' - name: - type: string - description: The name of the project. This appears in reporting. - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was created. - archived_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was archived or `null`. + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - status: - enum: - - active - - archived - type: string - description: '`active` or `archived`' - description: Represents an individual project. - x-oaiMeta: - name: The project object - example: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - ProjectListResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - enum: - - list - type: string - data: + refusal: + description: A list of message refusal tokens with log probability information. type: array items: - $ref: '#/components/schemas/Project' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectCreateRequest: - required: - - name - type: object - properties: - name: - type: string - description: 'The friendly name of the project, this name appears in reports.' - ProjectUpdateRequest: - required: - - name - type: object - properties: - name: - type: string - description: 'The updated name of the project, this name appears in reports.' - DefaultProjectErrorResponse: - required: - - code - - message - type: object - properties: - code: - type: integer - message: - type: string - ProjectUser: - required: - - object - - id - - name - - email - - role - - added_at + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, + + `length` if the maximum number of tokens specified in the + request was reached, + + `content_filter` if content was omitted due to a flag from our + content filters, + + `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was + created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is + only included if the `service_tier` parameter is specified in the + request. + type: string + enum: + - scale + - default + example: scale + nullable: true + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: + - chat.completion.chunk + usage: + type: object + nullable: true + description: > + An optional field that will only be present when you set + `stream_options: {"include_usage": true}` in your request. + + When present, it contains a null value except for the last chunk + which contains the token usage statistics for the entire request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: > + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + + + .... + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + CreateCompletionRequest: + type: object + properties: + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + anyOf: + - type: string + - type: string + enum: + - gpt-3.5-turbo-instruct + - davinci-002 + - babbage-002 + x-oaiTypeLabel: string + prompt: + description: > + The prompt(s) to generate completions for, encoded as a string, + array of strings, array of tokens, or array of token arrays. + + + Note that <|endoftext|> is the document separator that the model + sees during training, so if a prompt is not specified the model will + generate as if from the beginning of a new document. + default: <|endoftext|> + nullable: true + oneOf: + - type: string + default: "" + example: This is a test. + - type: array + items: + type: string + default: "" + example: This is a test. + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: > + Generates `best_of` completions server-side and returns the "best" + (the one with the highest log probability per token). Results cannot + be streamed. + + + When used with `n`, `best_of` controls the number of candidate + completions and `n` specifies how many to return – `best_of` must be + greater than `n`. + + + **Note:** Because this parameter generates many completions, it can + quickly consume your token quota. Use carefully and ensure that you + have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: | + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: > + Modify the likelihood of specified tokens appearing in the + completion. + + + Accepts a JSON object that maps tokens (specified by their token ID + in the GPT tokenizer) to an associated bias value from -100 to 100. + You can use this [tokenizer tool](/tokenizer?view=bpe) to convert + text to token IDs. Mathematically, the bias is added to the logits + generated by the model prior to sampling. The exact effect will vary + per model, but values between -1 and 1 should decrease or increase + likelihood of selection; values like -100 or 100 should result in a + ban or exclusive selection of the relevant token. + + + As an example, you can pass `{"50256": -100}` to prevent the + <|endoftext|> token from being generated. + logprobs: + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: > + Include the log probabilities on the `logprobs` most likely output + tokens, as well the chosen tokens. For example, if `logprobs` is 5, + the API will return a list of the 5 most likely tokens. The API will + always return the `logprob` of the sampled token, so there may be up + to `logprobs+1` elements in the response. + + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: > + The maximum number of [tokens](/tokenizer) that can be generated in + the completion. + + + The token count of your prompt plus `max_tokens` cannot exceed the + model's context length. [Example Python + code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: > + How many completions to generate for each prompt. + + + **Note:** Because this parameter generates many completions, it can + quickly consume your token quota. Use carefully and ensure that you + have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + seed: + type: integer + minimum: -9223372036854776000 + maximum: 9223372036854776000 + nullable: true + description: > + If specified, our system will make a best effort to sample + deterministically, such that repeated requests with the same `seed` + and parameters should return the same result. + + + Determinism is not guaranteed, and you should refer to the + `system_fingerprint` response parameter to monitor changes in the + backend. + stop: + description: > + Up to 4 sequences where the API will stop generating further tokens. + The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent + as data-only [server-sent + events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: + [DONE]` message. [Example Python + code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true + type: string + example: test. + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or `temperature` but not both. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - model + - prompt + CreateCompletionResponse: + type: object + description: > + Represents a completion response from the API. Note: both the streamed + and non-streamed response objects share the same shape (unlike the chat + endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input + prompt. + items: type: object - properties: - object: - enum: - - organization.project.user - type: string - description: 'The object type, which is always `organization.project.user`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was added. - description: Represents an individual user in a project. - x-oaiMeta: - name: The project user object - example: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - ProjectUserListResponse: required: - - object - - data - - first_id - - last_id - - has_more - type: object + - finish_reason + - index + - logprobs + - text properties: - object: - type: string - data: + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, + + `length` if the maximum number of tokens specified in the + request was reached, + + or `content_filter` if content was omitted due to a flag from + our content filters. + enum: + - stop + - length + - content_filter + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: type: array items: - $ref: '#/components/schemas/ProjectUser' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectUserCreateRequest: - required: - - user_id - - role - type: object - properties: - user_id: - type: string - description: The ID of the user. - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - ProjectUserUpdateRequest: - required: - - role + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: + - text_completion + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + CreateEmbeddingRequest: + type: object + additionalProperties: false + properties: + input: + description: > + Input text to embed, encoded as a string or array of tokens. To + embed multiple inputs in a single request, pass an array of strings + or array of token arrays. The input must not exceed the max input + tokens for the model (8192 tokens for `text-embedding-ada-002`), + cannot be an empty string, and any array must be 2048 dimensions or + less. [Example Python + code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + example: The quick brown fox jumped over the lazy dog + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: This is a test. + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: string + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an + embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + example: text-embedding-3-small + anyOf: + - type: string + - type: string + enum: + - text-embedding-ada-002 + - text-embedding-3-small + - text-embedding-3-large + x-oaiTypeLabel: string + encoding_format: + description: The format to return the embeddings in. Can be either `float` or + [`base64`](https://pypi.org/project/pybase64/). + example: float + default: float + type: string + enum: + - float + - base64 + dimensions: + description: > + The number of dimensions the resulting output embeddings should + have. Only supported in `text-embedding-3` and later models. + type: integer + minimum: 1 + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - model + - input + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + enum: + - list + usage: + type: object + description: The usage information for the request. + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + required: + - object + - model + - data + - usage + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The File object (not file name) to be uploaded. + type: string + format: binary + purpose: + description: > + The intended purpose of the uploaded file. + + + Use "assistants" for [Assistants](/docs/api-reference/assistants) + and [Message](/docs/api-reference/messages) files, "vision" for + Assistants image file inputs, "batch" for [Batch + API](/docs/guides/batch), and "fine-tune" for + [Fine-tuning](/docs/api-reference/fine-tuning). + type: string + enum: + - assistants + - batch + - fine-tune + - vision + required: + - file + - purpose + CreateFineTuningJobRequest: + type: object + properties: + model: + description: > + The name of the model to fine-tune. You can select one of the + + [supported + models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + example: gpt-4o-mini + anyOf: + - type: string + - type: string + enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + - gpt-4o-mini + x-oaiTypeLabel: string + training_file: + description: > + The ID of an uploaded file that contains training data. + + + See [upload file](/docs/api-reference/files/create) for how to + upload a file. + + + Your dataset must be formatted as a JSONL file. Additionally, you + must upload your file with the purpose `fine-tune`. + + + The contents of the file should differ depending on if the model + uses the [chat](/docs/api-reference/fine-tuning/chat-input) or + [completions](/docs/api-reference/fine-tuning/completions-input) + format. + + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more + details. + type: string + example: file-abc123 + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: > + Number of examples in each batch. A larger batch size means that + model parameters + + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: + - auto + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: > + Scaling factor for the learning rate. A smaller learning rate + may be useful to avoid + + overfitting. + oneOf: + - type: string + enum: + - auto + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: > + The number of epochs to train the model for. An epoch refers to + one full cycle + + through the training dataset. + oneOf: + - type: string + enum: + - auto + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: > + A string of up to 64 characters that will be added to your + fine-tuned model name. + + + For example, a `suffix` of "custom-model-name" would produce a model + name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 64 + default: null + nullable: true + validation_file: + description: > + The ID of an uploaded file that contains validation data. + + + If you provide this file, the data is used to generate validation + + metrics periodically during fine-tuning. These metrics can be viewed + in + + the fine-tuning results file. + + The same data should not be present in both train and validation + files. + + + Your dataset must be formatted as a JSONL file. You must upload your + file with the purpose `fine-tune`. + + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more + details. + type: string + nullable: true + example: file-abc123 + integrations: + type: array + description: A list of integrations to enable for your fine-tuning job. + nullable: true + items: type: object - properties: - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - ProjectUserDeleteResponse: required: - - object - - id - - deleted - type: object + - type + - wandb properties: - object: + type: + description: > + The type of integration to enable. Currently, only "wandb" + (Weights and Biases) is supported. + oneOf: + - type: string enum: - - organization.project.user.deleted + - wandb + wandb: + type: object + description: > + The settings for your integration with Weights and Biases. + This payload specifies the project that + + metrics will be sent to. Optionally, you can set an explicit + display name for your run, add tags + + to your run, and set a default entity (team, username, etc) to + be associated with your run. + required: + - project + properties: + project: + description: > + The name of the project that the new run will be created + under. + type: string + example: my-wandb-project + name: + description: > + A display name to set for the run. If not set, we will use + the Job ID as the name. + nullable: true type: string - id: + entity: + description: > + The entity to use for the run. This allows you to set the + team or username of the WandB user that you would + + like associated with the run. If not set, the default + entity for the registered WandB API key is used. + nullable: true type: string - deleted: - type: boolean - ProjectServiceAccount: - required: - - object - - id - - name - - role - - created_at + tags: + description: > + A list of tags to be attached to the newly created run. + These tags are passed through directly to WandB. Some + + default tags are generated by OpenAI: "openai/finetune", + "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: custom-tag + seed: + description: > + The seed controls the reproducibility of the job. Passing in the + same seed and job parameters should produce the same results, but + may differ in rare cases. + + If a seed is not specified, one will be generated for you. + type: integer + nullable: true + minimum: 0 + maximum: 2147483647 + example: 42 + required: + - model + - training_file + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and + square. If mask is not provided, image must have transparency, which + will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is + 1000 characters. + type: string + example: A cute baby sea otter wearing a beret + mask: + description: An additional image whose fully transparent areas (e.g. where alpha + is zero) indicate where `image` should be edited. Must be a valid + PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + x-oaiTypeLabel: string + default: dall-e-2 + example: dall-e-2 + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported + at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + default: 1024x1024 + example: 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, + `512x512`, or `1024x1024`. + response_format: + type: string + enum: + - url + - b64_json + default: url + example: url + nullable: true + description: The format in which the generated images are returned. Must be one + of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - prompt + - image + CreateImageRequest: + type: object + properties: + prompt: + description: A text description of the desired image(s). The maximum length is + 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + type: string + example: A cute baby sea otter + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + - dall-e-3 + x-oaiTypeLabel: string + default: dall-e-2 + example: dall-e-3 + nullable: true + description: The model to use for image generation. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For + `dall-e-3`, only `n=1` is supported. + quality: + type: string + enum: + - standard + - hd + default: standard + example: standard + description: The quality of the image that will be generated. `hd` creates + images with finer details and greater consistency across the image. + This param is only supported for `dall-e-3`. + response_format: + type: string + enum: + - url + - b64_json + default: url + example: url + nullable: true + description: The format in which the generated images are returned. Must be one + of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1024 + - 1024x1792 + default: 1024x1024 + example: 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, + `512x512`, or `1024x1024` for `dall-e-2`. Must be one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: + type: string + enum: + - vivid + - natural + default: vivid + example: vivid + nullable: true + description: The style of the generated images. Must be one of `vivid` or + `natural`. Vivid causes the model to lean towards generating + hyper-real and dramatic images. Natural causes the model to produce + more natural, less hyper-real looking images. This param is only + supported for `dall-e-3`. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - prompt + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid + PNG file, less than 4MB, and square. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + x-oaiTypeLabel: string + default: dall-e-2 + example: dall-e-2 + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported + at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For + `dall-e-3`, only `n=1` is supported. + response_format: + type: string + enum: + - url + - b64_json + default: url + example: url + nullable: true + description: The format in which the generated images are returned. Must be one + of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + default: 1024x1024 + example: 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, + `512x512`, or `1024x1024`. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - image + CreateMessageRequest: + type: object + additionalProperties: false + required: + - role + - content + properties: + role: + type: string + enum: + - user + - assistant + description: > + The role of the entity that is creating the message. Allowed values + include: + + - `user`: Indicates the message is sent by an actual user and should + be used in most cases to represent user-generated messages. + + - `assistant`: Indicates the message is generated by the assistant. + Use this value to insert messages from the assistant into the + conversation. + content: + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type + `text` or images can be passed with `image_url` or `image_file`. + Image types are only supported on [Vision-compatible + models](/docs/models). + title: Array of content parts + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageRequestContentTextObject" + x-oaiExpandable: true + minItems: 1 + x-oaiExpandable: true + attachments: + type: array + items: type: object properties: - object: - enum: - - organization.project.service_account - type: string - description: 'The object type, which is always `organization.project.service_account`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the service account - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the service account was created - description: Represents an individual service account in a project. - x-oaiMeta: - name: The project service account object - example: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - ProjectServiceAccountListResponse: - required: - - object - - data - - first_id - - last_id - - has_more + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should + be added to. + required: + - file_id + - tools + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + CreateModerationRequest: + type: object + properties: + input: + description: > + Input (or inputs) to classify. Can be a single string, an array of + strings, or + + an array of multi-modal input objects similar to other models. + oneOf: + - type: string + description: A string of text to classify for moderation. + default: "" + example: I want to kill them. + - type: array + description: An array of strings to classify for moderation. + items: + type: string + default: "" + example: I want to kill them. + - type: array + description: An array of multi-modal inputs to the moderation model. + items: + x-oaiExpandable: true + oneOf: + - type: object + description: An object describing an image to classify. + properties: + type: + description: Always `image_url`. + type: string + enum: + - image_url + image_url: + type: object + description: Contains either an image URL or a data URL for a base64 encoded + image. + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + example: https://example.com/image.jpg + required: + - url + required: + - type + - image_url + - type: object + description: An object describing text to classify. + properties: + type: + description: Always `text`. + type: string + enum: + - text + text: + description: A string of text to classify. + type: string + example: I want to kill them + required: + - type + - text + x-oaiExpandable: true + model: + description: | + The content moderation model you would like to use. Learn more in + [the moderation guide](/docs/guides/moderation), and learn about + available models [here](/docs/models#moderation). + nullable: false + default: omni-moderation-latest + example: omni-moderation-2024-09-26 + anyOf: + - type: string + - type: string + enum: + - omni-moderation-latest + - omni-moderation-2024-09-26 + - text-moderation-latest + - text-moderation-stable + x-oaiTypeLabel: string + required: + - input + CreateModerationResponse: + type: object + description: Represents if a given text input is potentially harmful. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: type: object properties: - object: - enum: - - list - type: string - data: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, + gender, ethnicity, religion, nationality, sexual + orientation, disability status, or caste. Hateful content + aimed at non-protected groups (e.g., chess players) is + harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards + the targeted group based on race, gender, ethnicity, + religion, nationality, sexual orientation, disability + status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language + towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm + towards any target. + illicit: + type: boolean + description: Content that includes instructions or advice that facilitate the + planning or execution of wrongdoing, or that gives advice + or instruction on how to commit illicit acts. For example, + "how to shoplift" would fit this category. + illicit/violent: + type: boolean + description: Content that includes instructions or advice that facilitate the + planning or execution of wrongdoing that also includes + violence, or that gives advice or instruction on the + procurement of any weapon. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, + such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or + intend to engage in acts of self-harm, such as suicide, + cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as + suicide, cutting, and eating disorders, or that gives + instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description + of sexual activity, or that promotes sexual services + (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years + old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic + detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by + model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + illicit: + type: number + description: The score for the category 'illicit'. + illicit/violent: + type: number + description: The score for the category 'illicit/violent'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_applied_input_types: + type: object + description: A list of the categories along with the input type(s) that the + score applies to. + properties: + hate: type: array + description: The applied input type(s) for the category 'hate'. items: - $ref: '#/components/schemas/ProjectServiceAccount' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectServiceAccountCreateRequest: - required: - - name - type: object - properties: - name: - type: string - description: The name of the service account being created. - ProjectServiceAccountCreateResponse: + type: string + enum: + - text + hate/threatening: + type: array + description: The applied input type(s) for the category 'hate/threatening'. + items: + type: string + enum: + - text + harassment: + type: array + description: The applied input type(s) for the category 'harassment'. + items: + type: string + enum: + - text + harassment/threatening: + type: array + description: The applied input type(s) for the category + 'harassment/threatening'. + items: + type: string + enum: + - text + illicit: + type: array + description: The applied input type(s) for the category 'illicit'. + items: + type: string + enum: + - text + illicit/violent: + type: array + description: The applied input type(s) for the category 'illicit/violent'. + items: + type: string + enum: + - text + self-harm: + type: array + description: The applied input type(s) for the category 'self-harm'. + items: + type: string + enum: + - text + - image + self-harm/intent: + type: array + description: The applied input type(s) for the category 'self-harm/intent'. + items: + type: string + enum: + - text + - image + self-harm/instructions: + type: array + description: The applied input type(s) for the category + 'self-harm/instructions'. + items: + type: string + enum: + - text + - image + sexual: + type: array + description: The applied input type(s) for the category 'sexual'. + items: + type: string + enum: + - text + - image + sexual/minors: + type: array + description: The applied input type(s) for the category 'sexual/minors'. + items: + type: string + enum: + - text + violence: + type: array + description: The applied input type(s) for the category 'violence'. + items: + type: string + enum: + - text + - image + violence/graphic: + type: array + description: The applied input type(s) for the category 'violence/graphic'. + items: + type: string + enum: + - text + - image + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic required: - - object - - id - - name - - role - - created_at - - api_key + - flagged + - categories + - category_scores + - category_applied_input_types + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: | + { + "id": "modr-0d9740456c391e43c445bf0f010940c7", + "model": "omni-moderation-latest", + "results": [ + { + "flagged": true, + "categories": { + "harassment": true, + "harassment/threatening": true, + "sexual": false, + "hate": false, + "hate/threatening": false, + "illicit": false, + "illicit/violent": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "self-harm": false, + "sexual/minors": false, + "violence": true, + "violence/graphic": true + }, + "category_scores": { + "harassment": 0.8189693396524255, + "harassment/threatening": 0.804985420696006, + "sexual": 1.573112165348997e-6, + "hate": 0.007562942636942845, + "hate/threatening": 0.004208854591835476, + "illicit": 0.030535955153511665, + "illicit/violent": 0.008925306722380033, + "self-harm/intent": 0.00023023930975076432, + "self-harm/instructions": 0.0002293869201073356, + "self-harm": 0.012598046106750154, + "sexual/minors": 2.212566909570261e-8, + "violence": 0.9999992735124786, + "violence/graphic": 0.843064871157054 + }, + "category_applied_input_types": { + "harassment": [ + "text" + ], + "harassment/threatening": [ + "text" + ], + "sexual": [ + "text", + "image" + ], + "hate": [ + "text" + ], + "hate/threatening": [ + "text" + ], + "illicit": [ + "text" + ], + "illicit/violent": [ + "text" + ], + "self-harm/intent": [ + "text", + "image" + ], + "self-harm/instructions": [ + "text", + "image" + ], + "self-harm": [ + "text", + "image" + ], + "sexual/minors": [ + "text" + ], + "violence": [ + "text", + "image" + ], + "violence/graphic": [ + "text", + "image" + ] + } + } + ] + } + CreateRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to + execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to + execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated + with the assistant will be used. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the + [instructions](/docs/api-reference/assistants/createAssistant) of + the assistant. This is useful for modifying the behavior on a + per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for + the run. This is useful for modifying the behavior on a per-run + basis without overriding other instructions. + type: string + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is + useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + stream: + type: boolean + nullable: true + description: > + If `true`, returns a stream of events that happen during the Run as + server-sent events, terminating when the Run enters a terminal state + with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: > + The maximum number of prompt tokens that may be used over the course + of the run. The run will make a best effort to use only the number + of prompt tokens specified, across multiple turns of the run. If the + run exceeds the number of prompt tokens specified, the run will end + with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: > + The maximum number of completion tokens that may be used over the + course of the run. The run will make a best effort to use only the + number of completion tokens specified, across multiple turns of the + run. If the run exceeds the number of completion tokens specified, + the run will end with status `incomplete`. See `incomplete_details` + for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - assistant_id + CreateSpeechRequest: + type: object + additionalProperties: false + properties: + model: + description: > + One of the available [TTS models](/docs/models#tts): `tts-1` or + `tts-1-hd` + anyOf: + - type: string + - type: string + enum: + - tts-1 + - tts-1-hd + x-oaiTypeLabel: string + input: + type: string + description: The text to generate audio for. The maximum length is 4096 + characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are + `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of + the voices are available in the [Text to speech + guide](/docs/guides/text-to-speech#voice-options). + type: string + enum: + - alloy + - echo + - fable + - onyx + - nova + - shimmer + response_format: + description: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, + `flac`, `wav`, and `pcm`. + default: mp3 + type: string + enum: + - mp3 + - opus + - aac + - flac + - wav + - pcm + speed: + description: The speed of the generated audio. Select a value from `0.25` to + `4.0`. `1.0` is the default. + type: number + default: 1 + minimum: 0.25 + maximum: 4 + required: + - model + - input + - voice + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to + execute this run. + type: string + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to + execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated + with the assistant will be used. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + nullable: true + instructions: + description: Override the default system message of the assistant. This is + useful for modifying the behavior on a per-run basis. + type: string + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is + useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The ID of the [vector + store](/docs/api-reference/vector-stores/object) attached to + this assistant. There can be a maximum of 1 vector store + attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + stream: + type: boolean + nullable: true + description: > + If `true`, returns a stream of events that happen during the Run as + server-sent events, terminating when the Run enters a terminal state + with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: > + The maximum number of prompt tokens that may be used over the course + of the run. The run will make a best effort to use only the number + of prompt tokens specified, across multiple turns of the run. If the + run exceeds the number of prompt tokens specified, the run will end + with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: > + The maximum number of completion tokens that may be used over the + course of the run. The run will make a best effort to use only the + number of completion tokens specified, across multiple turns of the + run. If the run exceeds the number of completion tokens specified, + the run will end with status `incomplete`. See `incomplete_details` + for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - assistant_id + CreateThreadRequest: + type: object + additionalProperties: false + properties: + messages: + description: A list of [messages](/docs/api-reference/messages) to start the + thread with. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + tool_resources: + type: object + description: > + A set of resources that are made available to the assistant's tools + in this thread. The resources are specific to the type of tool. For + example, the `code_interpreter` tool requires a list of file IDs, + while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector + store attached to the thread. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: > + A helper to create a [vector + store](/docs/api-reference/vector-stores/object) with + file_ids and attach it to this thread. There can be a + maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs to add + to the vector store. There can be a maximum of 10000 + files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a + `max_chunk_size_tokens` of `800` and + `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: + - auto + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: + - static + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is + `800`. The minimum value is `100` and the + maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: > + The number of tokens that overlap between + chunks. The default value is `400`. + + + Note that the overlap must not exceed half + of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: > + Set of 16 key-value pairs that can be attached to a + vector store. This can be useful for storing + additional information about the vector store in a + structured format. Keys can be a maximum of 64 + characters long and values can be a maximum of 512 + characters long. + x-oaiTypeLabel: map + x-oaiExpandable: true + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + CreateTranscriptionRequest: + type: object + additionalProperties: false + properties: + file: + description: > + The audio file object (not file name) to transcribe, in one of these + formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: > + ID of the model to use. Only `whisper-1` (which is powered by our + open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: + - whisper-1 + x-oaiTypeLabel: string + language: + description: > + The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) + format will improve accuracy and latency. + type: string + prompt: + description: > + An optional text to guide the model's style or continue a previous + audio segment. The [prompt](/docs/guides/speech-to-text#prompting) + should match the audio language. + type: string + response_format: + $ref: "#/components/schemas/AudioResponseFormat" + temperature: + description: > + The sampling temperature, between 0 and 1. Higher values like 0.8 + will make the output more random, while lower values like 0.2 will + make it more focused and deterministic. If set to 0, the model will + use [log probability](https://en.wikipedia.org/wiki/Log_probability) + to automatically increase the temperature until certain thresholds + are hit. + type: number + default: 0 + timestamp_granularities[]: + description: > + The timestamp granularities to populate for this transcription. + `response_format` must be set `verbose_json` to use timestamp + granularities. Either or both of these options are supported: + `word`, or `segment`. Note: There is no additional latency for + segment timestamps, but generating word timestamps incurs additional + latency. + type: array + items: + type: string + enum: + - word + - segment + default: + - segment + required: + - file + - model + CreateTranscriptionResponseJson: + type: object + description: Represents a transcription response returned by model, based on the + provided input. + properties: + text: + type: string + description: The transcribed text. + required: + - text + x-oaiMeta: + name: The transcription object (JSON) + group: audio + example: > + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + CreateTranscriptionResponseVerboseJson: + type: object + description: Represents a verbose json transcription response returned by model, + based on the provided input. + properties: + language: + type: string + description: The language of the input audio. + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: + - language + - duration + - text + x-oaiMeta: + name: The transcription object (Verbose JSON) + group: audio + example: > + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "segments": [ + { + "id": 0, + "seek": 0, + "start": 0.0, + "end": 3.319999933242798, + "text": " The beach was a popular spot on a hot summer day.", + "tokens": [ + 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530 + ], + "temperature": 0.0, + "avg_logprob": -0.2860786020755768, + "compression_ratio": 1.2363636493682861, + "no_speech_prob": 0.00985979475080967 + }, + ... + ] + } + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: > + The audio file object (not file name) translate, in one of these + formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: > + ID of the model to use. Only `whisper-1` (which is powered by our + open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: + - whisper-1 + x-oaiTypeLabel: string + prompt: + description: > + An optional text to guide the model's style or continue a previous + audio segment. The [prompt](/docs/guides/speech-to-text#prompting) + should be in English. + type: string + response_format: + $ref: "#/components/schemas/AudioResponseFormat" + temperature: + description: > + The sampling temperature, between 0 and 1. Higher values like 0.8 + will make the output more random, while lower values like 0.2 will + make it more focused and deterministic. If set to 0, the model will + use [log probability](https://en.wikipedia.org/wiki/Log_probability) + to automatically increase the temperature until certain thresholds + are hit. + type: number + default: 0 + required: + - file + - model + CreateTranslationResponseJson: + type: object + properties: + text: + type: string + required: + - text + CreateTranslationResponseVerboseJson: + type: object + properties: + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: + - language + - duration + - text + CreateUploadRequest: + type: object + additionalProperties: false + properties: + filename: + description: | + The name of the file to upload. + type: string + purpose: + description: > + The intended purpose of the uploaded file. + + + See the [documentation on File + purposes](/docs/api-reference/files/create#files-create-purpose). + type: string + enum: + - assistants + - batch + - fine-tune + - vision + bytes: + description: | + The number of bytes in the file you are uploading. + type: integer + mime_type: + description: > + The MIME type of the file. + + + This must fall within the supported MIME types for your file + purpose. See the supported MIME types for assistants and vision. + type: string + required: + - filename + - purpose + - bytes + - mime_type + CreateVectorStoreFileBatchRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector + store should use. Useful for tools like `file_search` that can + access files. + type: array + minItems: 1 + maxItems: 500 + items: + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_ids + CreateVectorStoreFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should + use. Useful for tools like `file_search` that can access files. + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_id + CreateVectorStoreRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector + store should use. Useful for tools like `file_search` that can + access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. + type: string + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + DefaultProjectErrorResponse: + type: object + properties: + code: + type: integer + message: + type: string + required: + - code + - message + DeleteAssistantResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - assistant.deleted + required: + - id + - object + - deleted + DeleteFileResponse: + type: object + properties: + id: + type: string + object: + type: string + enum: + - file + deleted: + type: boolean + required: + - id + - object + - deleted + DeleteMessageResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - thread.message.deleted + required: + - id + - object + - deleted + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + DeleteThreadResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - thread.deleted + required: + - id + - object + - deleted + DeleteVectorStoreFileResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - vector_store.file.deleted + required: + - id + - object + - deleted + DeleteVectorStoreResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - vector_store.deleted + required: + - id + - object + - deleted + DoneEvent: + type: object + properties: + event: + type: string + enum: + - done + data: + type: string + enum: + - "[DONE]" + required: + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array + description: > + The embedding vector, which is a list of floats. The length of + vector depends on the model as listed in the [embedding + guide](/docs/guides/embeddings). + items: + type: number + object: + type: string + description: The object type, which is always "embedding". + enum: + - embedding + required: + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorEvent: + type: object + properties: + event: + type: string + enum: + - error + data: + $ref: "#/components/schemas/Error" + required: + - event + - data + description: Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. + This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes#api-errors)" + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + FileSearchRankingOptions: + title: File search tool call ranking options + type: object + description: > + The ranking options for the file search. If not specified, the file + search tool will use the `auto` ranker and a score_threshold of 0. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + properties: + ranker: + type: string + description: The ranker to use for the file search. If not specified will use + the `auto` ranker. + enum: + - auto + - default_2024_08_21 + score_threshold: + type: number + description: The score threshold for the file search. All values must be a + floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - score_threshold + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: + - 0 + - 1 + description: Controls whether the assistant message is trained against (0 or 1) + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + required: + - role + FineTuningIntegration: + type: object + title: Fine-Tuning Job Integration + required: + - type + - wandb + properties: + type: + type: string + description: The type of the integration being enabled for the fine-tuning job + enum: + - wandb + wandb: + type: object + description: > + The settings for your integration with Weights and Biases. This + payload specifies the project that + + metrics will be sent to. Optionally, you can set an explicit display + name for your run, add tags + + to your run, and set a default entity (team, username, etc) to be + associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: my-wandb-project + name: + description: > + A display name to set for the run. If not set, we will use the + Job ID as the name. + nullable: true + type: string + entity: + description: > + The entity to use for the run. This allows you to set the team + or username of the WandB user that you would + + like associated with the run. If not set, the default entity for + the registered WandB API key is used. + nullable: true + type: string + tags: + description: > + A list of tags to be attached to the newly created run. These + tags are passed through directly to WandB. Some + + default tags are generated by OpenAI: "openai/finetune", + "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: custom-tag + FineTuningJob: + type: object + title: FineTuningJob + description: > + The `fine_tuning.job` object represents a fine-tuning job that has been + created through the API. + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was + created. + error: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more + information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or + `validation_file`. This field will be null if the failure was + not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. The value + will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was + finished. The value will be null if the fine-tuning job is still + running. + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/fine-tuning) for more details. + properties: + n_epochs: + oneOf: + - type: string + enum: + - auto + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: >- + The number of epochs to train the model for. An epoch refers to + one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of + the dataset. If setting the number manually, we support any + number between 1 and 50 epochs. + required: + - n_epochs + model: + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: + - fine_tuning.job + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can + retrieve the results with the [Files + API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either + `validating_files`, `queued`, `running`, `succeeded`, `failed`, or + `cancelled`. + enum: + - validating_files + - queued + - running + - succeeded + - failed + - cancelled + trained_tokens: + type: integer + nullable: true + description: The total number of billable tokens processed by this fine-tuning + job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data + with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: The file ID used for validation. You can retrieve the validation + results with the [Files + API](/docs/api-reference/files/retrieve-contents). + integrations: + type: array + nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 + items: + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true + seed: + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job is + estimated to finish. The value will be null if the fine-tuning job + is not running. + required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "davinci-002", + "created_at": 1692661014, + "finished_at": 1692661190, + "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", + "organization_id": "org-123", + "result_files": [ + "file-abc123" + ], + "status": "succeeded", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": { + "n_epochs": 4, + "batch_size": 1, + "learning_rate_multiplier": 1.0 + }, + "trained_tokens": 5768, + "integrations": [], + "seed": 0, + "estimated_finish": 0 + } + FineTuningJobCheckpoint: + type: object + title: FineTuningJobCheckpoint + description: > + The `fine_tuning.job.checkpoint` object represents a model checkpoint + for a fine-tuning job that is ready to use. + properties: + id: + type: string + description: The checkpoint identifier, which can be referenced in the API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: + type: string + description: The name of the fine-tuning job that this checkpoint was created + from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: + - fine_tuning.job.checkpoint + required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics + - object + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: > + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } + FineTuningJobEvent: + type: object + description: Fine-tuning job event object + properties: + id: + type: string + created_at: + type: integer + level: + type: string + enum: + - info + - warn + - error + message: + type: string + object: + type: string + enum: + - fine_tuning.job.event + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } + FinetuneChatRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for chat models + properties: + messages: + type: array + minItems: 1 + items: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + x-oaiMeta: + name: Training format for chat models + example: > + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] + } + } + } + ] + } + FinetuneCompletionRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for + completions models + properties: + prompt: + type: string + description: The input prompt for this training example. + completion: + type: string + description: The desired completion for this training example. + x-oaiMeta: + name: Training format for completions models + example: | + { + "prompt": "What is the answer to 2+2", + "completion": "4" + } + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to + choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or + contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the + function call. If set to true, the model will follow the exact + schema defined in the `parameters` field. Only a subset of JSON + Schema is supported when `strict` is `true`. Learn more about + Structured Outputs in the [function calling + guide](docs/guides/function-calling). + required: + - name + FunctionParameters: + type: object + description: >- + The parameters the functions accepts, described as a JSON Schema object. + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema + reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + + Omitting `parameters` defines a function with an empty parameter list. + additionalProperties: true + Image: + type: object + description: Represents the url or the content of an image generated by the + OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if + `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` + (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any + revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } + ImagesResponse: + properties: + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + Invite: + type: object + description: Represents an individual `invite` to the organization. + properties: + object: + type: string + enum: + - organization.invite + description: The object type, which is always `organization.invite` + id: + type: string + description: The identifier, which can be referenced in API endpoints + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + status: + type: string + enum: + - accepted + - expired + - pending + description: "`accepted`,`expired`, or `pending`" + invited_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + required: + - object + - id + - email + - role + - status + - invited_at + - expires_at + x-oaiMeta: + name: The invite object + example: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + InviteDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.invite.deleted + description: The object type, which is always `organization.invite.deleted` + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + InviteListResponse: + type: object + properties: + object: + type: string + enum: + - list + description: The object type, which is always `list` + data: + type: array + items: + $ref: "#/components/schemas/Invite" + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there + are additional results. + required: + - object + - data + InviteRequest: + type: object + properties: + email: + type: string + description: Send an email to this address + role: + type: string + enum: + - reader + - owner + description: "`owner` or `reader`" + required: + - email + - role + ListAssistantsResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: + type: string + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: > + { + "object": "list", + "data": [ + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4o", + "instructions": null, + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false + } + ListAuditLogsResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/AuditLog" + first_id: + type: string + example: audit_log-defb456h8dks + last_id: + type: string + example: audit_log-hnbkd8s93s + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ListBatchesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/Batch" + first_id: + type: string + example: batch_abc123 + last_id: + type: string + example: batch_abc456 + has_more: + type: boolean + object: + type: string + enum: + - list + required: + - object + - data + - has_more + ListFilesResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListFineTuningJobCheckpointsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + enum: + - list + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean + required: + - object + - data + - has_more + ListFineTuningJobEventsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + enum: + - list + required: + - object + - data + ListMessagesResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/MessageObject" + first_id: + type: string + example: msg_abc123 + last_id: + type: string + example: msg_abc123 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListModelsResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + ListPaginatedFineTuningJobsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + enum: + - list + required: + - object + - data + - has_more + ListRunStepsResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: step_abc123 + last_id: + type: string + example: step_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListRunsResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: + type: string + example: run_abc123 + last_id: + type: string + example: run_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListThreadsResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/ThreadObject" + first_id: + type: string + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListVectorStoreFilesResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreFileObject" + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListVectorStoresResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreObject" + first_id: + type: string + example: vs_abc123 + last_id: + type: string + example: vs_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + MessageContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the + content of a message. + properties: + type: + description: Always `image_file`. + type: string + enum: + - image_file + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the + message content. Set `purpose="vision"` when uploading the File + if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. + `low` uses fewer tokens, you can opt in to high resolution using + `high`. + enum: + - auto + - low + - high + default: auto + required: + - file_id + required: + - type + - image_file + MessageContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: "The external URL of the image, must be a supported image types: + jpeg, jpg, png, gif, webp." + format: uri + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, + you can opt in to high resolution using `high`. Default value is + `auto` + enum: + - auto + - low + - high + default: auto + required: + - url + required: + - type + - image_url + MessageContentRefusalObject: + title: Refusal + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + enum: + - refusal + refusal: + type: string + nullable: false + required: + - type + - refusal + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from + a specific File associated with the assistant or the message. Generated + when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. + type: string + enum: + - file_citation + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_citation + - start_index + - end_index + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the + `code_interpreter` tool to generate a file. + properties: + type: + description: Always `file_path`. + type: string + enum: + - file_path + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_path + - start_index + - end_index + MessageContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + type: + description: Always `text`. + type: string + enum: + - text + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations + required: + - type + - text + MessageDeltaContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the + content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_file`. + type: string + enum: + - image_file + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the + message content. Set `purpose="vision"` when uploading the File + if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. + `low` uses fewer tokens, you can opt in to high resolution using + `high`. + enum: + - auto + - low + - high + default: auto + required: + - index + - type + MessageDeltaContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_url`. + type: string + enum: + - image_url + image_url: + type: object + properties: + url: + description: "The URL of the image, must be a supported image types: jpeg, jpg, + png, gif, webp." + type: string + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, + you can opt in to high resolution using `high`. + enum: + - auto + - low + - high + default: auto + required: + - index + - type + MessageDeltaContentRefusalObject: + title: Refusal + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. + type: string + enum: + - refusal + refusal: + type: string + required: + - index + - type + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from + a specific File associated with the assistant or the message. Generated + when the assistant uses the "file_search" tool to search files. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. + type: string + enum: + - file_citation + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the + `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: + - file_path + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + MessageDeltaContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. + type: string + enum: + - text + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObjec\ + t" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - index + - type + MessageDeltaObject: + type: object + title: Message delta object + description: > + Represents a message delta i.e. any changed fields on a message during + streaming. + properties: + id: + description: The identifier of the message, which can be referenced in API + endpoints. + type: string + object: + description: The object type, which is always `thread.message.delta`. + type: string + enum: + - thread.message.delta + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: + - user + - assistant + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + MessageObject: + type: object + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.message`. + type: string + enum: + - thread.message + created_at: + description: The Unix timestamp (in seconds) for when the message was created. + type: integer + thread_id: + description: The [thread](/docs/api-reference/threads) ID that this message + belongs to. + type: string + status: + description: The status of the message, which can be either `in_progress`, + `incomplete`, or `completed`. + type: string + enum: + - in_progress + - incomplete + - completed + incomplete_details: + description: On an incomplete message, details about why the message is + incomplete. + type: object + properties: + reason: + type: string + description: The reason the message is incomplete. + enum: + - content_filter + - max_tokens + - run_cancelled + - run_expired + - run_failed + nullable: true + required: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer + nullable: true + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as + incomplete. + type: integer + nullable: true + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: + - user + - assistant + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the + [assistant](/docs/api-reference/assistants) that authored this + message. + type: string + nullable: true + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the + creation of this message. Value is `null` when messages are created + manually using the create message or create thread endpoints. + type: string + nullable: true + attachments: + type: array + items: type: object properties: - object: - enum: - - organization.project.service_account - type: string - id: - type: string - name: - type: string - role: - enum: - - member - type: string - description: Service accounts can only have one role of type `member` - created_at: - type: integer - api_key: - $ref: '#/components/schemas/ProjectServiceAccountApiKey' - ProjectServiceAccountApiKey: - required: - - object - - value - - name - - created_at - - id + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were + added to. + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - created_at + - thread_id + - status + - incomplete_details + - completed_at + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments + - metadata + x-oaiMeta: + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} + } + MessageRequestContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + type: + description: Always `text`. + type: string + enum: + - text + text: + type: string + description: Text content to be sent to the model + required: + - type + - text + MessageStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: + - thread.message.created + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is + created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: + - thread.message.in_progress + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves + to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: + - thread.message.delta + data: + $ref: "#/components/schemas/MessageDeltaObject" + required: + - event + - data + description: Occurs when parts of a + [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message + delta](/docs/api-reference/assistants-streaming/message-delta-obj\ + ect)" + - type: object + properties: + event: + type: string + enum: + - thread.message.completed + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is + completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: + - thread.message.incomplete + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends + before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: + type: string + description: The object type, which is always "model". + enum: + - model + owned_by: + type: string + description: The organization that owns the model. + required: + - id + - object + - created + - owned_by + x-oaiMeta: + name: The model object + example: | + { + "id": "VAR_chat_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + } + ModifyAssistantRequest: + type: object + additionalProperties: false + properties: + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + anyOf: + - type: string + name: + description: | + The name of the assistant. The maximum length is 256 characters. + type: string + nullable: true + maxLength: 256 + description: + description: > + The description of the assistant. The maximum length is 512 + characters. + type: string + nullable: true + maxLength: 512 + instructions: + description: > + The system instructions that the assistant uses. The maximum length + is 256,000 characters. + type: string + nullable: true + maxLength: 256000 + tools: + description: > + A list of tool enabled on the assistant. There can be a maximum of + 128 tools per assistant. Tools can be of types `code_interpreter`, + `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + Overrides the list of [file](/docs/api-reference/files) IDs + made available to the `code_interpreter` tool. There can be + a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + Overrides the [vector + store](/docs/api-reference/vector-stores/object) attached to + this assistant. There can be a maximum of 1 vector store + attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + ModifyMessageRequest: + type: object + additionalProperties: false + properties: + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + ModifyRunRequest: + type: object + additionalProperties: false + properties: + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + ModifyThreadRequest: + type: object + additionalProperties: false + properties: + tool_resources: + type: object + description: > + A set of resources that are made available to the assistant's tools + in this thread. The resources are specific to the type of tool. For + example, the `code_interpreter` tool requires a list of file IDs, + while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector + store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. + properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + object: + type: string + description: The object type, which is always `file`. + enum: + - file + purpose: + type: string + description: The intended purpose of the file. Supported values are + `assistants`, `assistants_output`, `batch`, `batch_output`, + `fine-tune`, `fine-tune-results` and `vision`. + enum: + - assistants + - assistants_output + - batch + - batch_output + - fine-tune + - fine-tune-results + - vision + status: + type: string + deprecated: true + description: Deprecated. The current status of the file, which can be either + `uploaded`, `processed`, or `error`. + enum: + - uploaded + - processed + - error + status_details: + type: string + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed + validation, see the `error` field on `fine_tuning.job`. + required: + - id + - object + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + OtherChunkingStrategyResponseParam: + type: object + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, + this is because the file was indexed before the `chunking_strategy` + concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + enum: + - other + required: + - type + ParallelToolCalls: + description: Whether to enable [parallel function + calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + type: boolean + default: true + PredictionContent: + type: object + title: Static Content + description: > + Static predicted output content, such as the content of a text file that + is + + being regenerated. + required: + - type + - content + properties: + type: + type: string + enum: + - content + description: | + The type of the predicted content you want to provide. This type is + currently always `content`. + content: + x-oaiExpandable: true + description: > + The content that should be matched when generating a model response. + + If generated tokens would match this content, the entire model + response + + can be returned much more quickly. + oneOf: + - type: string + title: Text content + description: | + The content used for a Predicted Output. This is often the + text of a file you are regenerating with minor changes. + - type: array + description: An array of content parts with a defined type. Supported options + differ based on the [model](/docs/models) being used to generate + the response. Can contain text inputs. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + minItems: 1 + Project: + type: object + description: Represents an individual project. + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints + object: + type: string + enum: + - organization.project + description: The object type, which is always `organization.project` + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) of when the project was archived or + `null`. + status: + type: string + enum: + - active + - archived + description: "`active` or `archived`" + required: + - id + - object + - name + - created_at + - status + x-oaiMeta: + name: The project object + example: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ProjectApiKey: + type: object + description: Represents an individual API key in a project. + properties: + object: + type: string + enum: + - organization.project.api_key + description: The object type, which is always `organization.project.api_key` + redacted_value: + type: string + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: The identifier, which can be referenced in API endpoints + owner: + type: object + properties: + type: + type: string + enum: + - user + - service_account + description: "`user` or `service_account`" + user: + $ref: "#/components/schemas/ProjectUser" + service_account: + $ref: "#/components/schemas/ProjectServiceAccount" + required: + - object + - redacted_value + - name + - created_at + - id + - owner + x-oaiMeta: + name: The project API key object + example: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "created_at": 1711471533 + } + } + } + ProjectApiKeyDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.api_key.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + ProjectApiKeyListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/ProjectApiKey" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectCreateRequest: + type: object + properties: + name: + type: string + description: The friendly name of the project, this name appears in reports. + required: + - name + ProjectListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/Project" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectRateLimit: + type: object + description: Represents a project rate limit config. + properties: + object: + type: string + enum: + - project.rate_limit + description: The object type, which is always `project.rate_limit` + id: + type: string + description: The identifier, which can be referenced in API endpoints. + model: + type: string + description: The model this rate limit applies to. + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only present for relevant models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only present for relevant + models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only present for relevant models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only present for relevant + models. + required: + - object + - id + - model + - max_requests_per_1_minute + - max_tokens_per_1_minute + x-oaiMeta: + name: The project rate limit object + example: | + { + "object": "project.rate_limit", + "id": "rl_ada", + "model": "ada", + "max_requests_per_1_minute": 600, + "max_tokens_per_1_minute": 150000, + "max_images_per_1_minute": 10 + } + ProjectRateLimitListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/ProjectRateLimit" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectRateLimitUpdateRequest: + type: object + properties: + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only relevant for certain models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only relevant for certain + models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only relevant for certain models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only relevant for certain + models. + ProjectServiceAccount: + type: object + description: Represents an individual service account in a project. + properties: + object: + type: string + enum: + - organization.project.service_account + description: The object type, which is always + `organization.project.service_account` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the service account + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the service account was + created + required: + - object + - id + - name + - role + - created_at + x-oaiMeta: + name: The project service account object + example: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ProjectServiceAccountApiKey: + type: object + properties: + object: + type: string + enum: + - organization.project.service_account.api_key + description: The object type, which is always + `organization.project.service_account.api_key` + value: + type: string + name: + type: string + created_at: + type: integer + id: + type: string + required: + - object + - value + - name + - created_at + - id + ProjectServiceAccountCreateRequest: + type: object + properties: + name: + type: string + description: The name of the service account being created. + required: + - name + ProjectServiceAccountCreateResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.service_account + id: + type: string + name: + type: string + role: + type: string + enum: + - member + description: Service accounts can only have one role of type `member` + created_at: + type: integer + api_key: + $ref: "#/components/schemas/ProjectServiceAccountApiKey" + required: + - object + - id + - name + - role + - created_at + - api_key + ProjectServiceAccountDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.service_account.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + ProjectServiceAccountListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/ProjectServiceAccount" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectUpdateRequest: + type: object + properties: + name: + type: string + description: The updated name of the project, this name appears in reports. + required: + - name + ProjectUser: + type: object + description: Represents an individual user in a project. + properties: + object: + type: string + enum: + - organization.project.user + description: The object type, which is always `organization.project.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was added. + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The project user object + example: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ProjectUserCreateRequest: + type: object + properties: + user_id: + type: string + description: The ID of the user. + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + required: + - user_id + - role + ProjectUserDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.user.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + ProjectUserListResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: "#/components/schemas/ProjectUser" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectUserUpdateRequest: + type: object + properties: + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + required: + - role + RealtimeClientEventConversationItemCreate: + type: object + description: > + Add a new Item to the Conversation's context, including messages, + function + + calls, and function call responses. This event can be used both to + populate a + + "history" of the conversation and to add new items mid-stream, but has + the + + current limitation that it cannot populate assistant audio messages. + + + If successful, the server will respond with a + `conversation.item.created` + + event, otherwise an `error` event will be sent. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - conversation.item.create + description: The event type, must be `conversation.item.create`. + previous_item_id: + type: string + description: > + The ID of the preceding item after which the new item will be + inserted. + + If not set, the new item will be appended to the end of the + conversation. + + If set, it allows an item to be inserted mid-conversation. If the + ID + + cannot be found, an error will be returned and the item will not be + added. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - type + - item + x-oaiMeta: + name: conversation.item.create + group: realtime + example: | + { + "event_id": "event_345", + "type": "conversation.item.create", + "previous_item_id": null, + "item": { + "id": "msg_001", + "type": "message", + "role": "user", + "content": [ + { + "type": "input_text", + "text": "Hello, how are you?" + } + ] + } + } + RealtimeClientEventConversationItemDelete: + type: object + description: > + Send this event when you want to remove any item from the conversation + + history. The server will respond with a `conversation.item.deleted` + event, + + unless the item does not exist in the conversation history, in which + case the + + server will respond with an error. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - conversation.item.delete + description: The event type, must be `conversation.item.delete`. + item_id: + type: string + description: The ID of the item to delete. + required: + - type + - item_id + x-oaiMeta: + name: conversation.item.delete + group: realtime + example: | + { + "event_id": "event_901", + "type": "conversation.item.delete", + "item_id": "msg_003" + } + RealtimeClientEventConversationItemTruncate: + type: object + description: > + Send this event to truncate a previous assistant message’s audio. The + server + + will produce audio faster than realtime, so this event is useful when + the user + + interrupts to truncate audio that has already been sent to the client + but not + + yet played. This will synchronize the server's understanding of the + audio with + + the client's playback. + + + Truncating audio will delete the server-side text transcript to ensure + there + + is not text in the context that hasn't been heard by the user. + + + If successful, the server will respond with a + `conversation.item.truncated` + + event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - conversation.item.truncate + description: The event type, must be `conversation.item.truncate`. + item_id: + type: string + description: > + The ID of the assistant message item to truncate. Only assistant + message + + items can be truncated. + content_index: + type: integer + description: The index of the content part to truncate. Set this to 0. + audio_end_ms: + type: integer + description: > + Inclusive duration up to which audio is truncated, in milliseconds. + If + + the audio_end_ms is greater than the actual audio duration, the + server + + will respond with an error. + required: + - type + - item_id + - content_index + - audio_end_ms + x-oaiMeta: + name: conversation.item.truncate + group: realtime + example: | + { + "event_id": "event_678", + "type": "conversation.item.truncate", + "item_id": "msg_002", + "content_index": 0, + "audio_end_ms": 1500 + } + RealtimeClientEventInputAudioBufferAppend: + type: object + description: > + Send this event to append audio bytes to the input audio buffer. The + audio + + buffer is temporary storage you can write to and later commit. In Server + VAD + + mode, the audio buffer is used to detect speech and the server will + decide + + when to commit. When Server VAD is disabled, you must commit the audio + buffer + + manually. + + + The client may choose how much audio to place in each event up to a + maximum + + of 15 MiB, for example streaming smaller chunks from the client may + allow the + + VAD to be more responsive. Unlike made other client events, the server + will + + not send a confirmation response to this event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - input_audio_buffer.append + description: The event type, must be `input_audio_buffer.append`. + audio: + type: string + description: > + Base64-encoded audio bytes. This must be in the format specified by + the + + `input_audio_format` field in the session configuration. + required: + - type + - audio + x-oaiMeta: + name: input_audio_buffer.append + group: realtime + example: | + { + "event_id": "event_456", + "type": "input_audio_buffer.append", + "audio": "Base64EncodedAudioData" + } + RealtimeClientEventInputAudioBufferClear: + type: object + description: | + Send this event to clear the audio bytes in the buffer. The server will + respond with an `input_audio_buffer.cleared` event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - input_audio_buffer.clear + description: The event type, must be `input_audio_buffer.clear`. + required: + - type + x-oaiMeta: + name: input_audio_buffer.clear + group: realtime + example: | + { + "event_id": "event_012", + "type": "input_audio_buffer.clear" + } + RealtimeClientEventInputAudioBufferCommit: + type: object + description: > + Send this event to commit the user input audio buffer, which will create + a + + new user message item in the conversation. This event will produce an + error + + if the input audio buffer is empty. When in Server VAD mode, the client + does + + not need to send this event, the server will commit the audio buffer + + automatically. + + + Committing the input audio buffer will trigger input audio + transcription + + (if enabled in session configuration), but it will not create a + response + + from the model. The server will respond with an + `input_audio_buffer.committed` + + event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - input_audio_buffer.commit + description: The event type, must be `input_audio_buffer.commit`. + required: + - type + x-oaiMeta: + name: input_audio_buffer.commit + group: realtime + example: | + { + "event_id": "event_789", + "type": "input_audio_buffer.commit" + } + RealtimeClientEventResponseCancel: + type: object + description: > + Send this event to cancel an in-progress response. The server will + respond + + with a `response.cancelled` event or an error if there is no response + to + + cancel. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - response.cancel + description: The event type, must be `response.cancel`. + required: + - type + x-oaiMeta: + name: response.cancel + group: realtime + example: | + { + "event_id": "event_567", + "type": "response.cancel" + } + RealtimeClientEventResponseCreate: + type: object + description: > + This event instructs the server to create a Response, which means + triggering + + model inference. When in Server VAD mode, the server will create + Responses + + automatically. + + + A Response will include at least one Item, and may have two, in which + case + + the second will be a function call. These Items will be appended to the + + conversation history. + + + The server will respond with a `response.created` event, events for + Items + + and content created, and finally a `response.done` event to indicate + the + + Response is complete. + + + The `response.create` event includes inference configuration like + + `instructions`, and `temperature`. These fields will override the + Session's + + configuration for this Response only. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - response.create + description: The event type, must be `response.create`. + response: + $ref: "#/components/schemas/RealtimeSession" + required: + - type + - response + x-oaiMeta: + name: response.create + group: realtime + example: | + { + "event_id": "event_234", + "type": "response.create", + "response": { + "modalities": ["text", "audio"], + "instructions": "Please assist the user.", + "voice": "sage", + "output_audio_format": "pcm16", + "tools": [ + { + "type": "function", + "name": "calculate_sum", + "description": "Calculates the sum of two numbers.", + "parameters": { + "type": "object", + "properties": { + "a": { "type": "number" }, + "b": { "type": "number" } + }, + "required": ["a", "b"] + } + } + ], + "tool_choice": "auto", + "temperature": 0.7, + "max_output_tokens": 150 + } + } + RealtimeClientEventSessionUpdate: + type: object + description: > + Send this event to update the session’s default configuration. The + client may + + send this event at any time to update the session configuration, and + any + + field may be updated at any time, except for "voice". The server will + respond + + with a `session.updated` event that shows the full effective + configuration. + + Only fields that are present are updated, thus the correct way to clear + a + + field like "instructions" is to pass an empty string. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - session.update + description: The event type, must be `session.update`. + session: + $ref: "#/components/schemas/RealtimeSession" + required: + - type + - session + x-oaiMeta: + name: session.update + group: realtime + example: | + { + "event_id": "event_123", + "type": "session.update", + "session": { + "modalities": ["text", "audio"], + "instructions": "You are a helpful assistant.", + "voice": "sage", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "input_audio_transcription": { + "model": "whisper-1" + }, + "turn_detection": { + "type": "server_vad", + "threshold": 0.5, + "prefix_padding_ms": 300, + "silence_duration_ms": 500 + }, + "tools": [ + { + "type": "function", + "name": "get_weather", + "description": "Get the current weather...", + "parameters": { + "type": "object", + "properties": { + "location": { "type": "string" } + }, + "required": ["location"] + } + } + ], + "tool_choice": "auto", + "temperature": 0.8, + "max_response_output_tokens": "inf" + } + } + RealtimeConversationItem: + type: object + x-oaiExpandable: true + description: The item to add to the conversation. + properties: + id: + type: string + description: > + The unique ID of the item, this can be generated by the client to + help + + manage server-side context, but is not required because the server + will + + generate one if not provided. + type: + type: string + enum: + - message + - function_call + - function_call_output + description: > + The type of the item (`message`, `function_call`, + `function_call_output`). + object: + type: string + enum: + - realtime.item + description: > + Identifier for the API object being returned - always + `realtime.item`. + status: + type: string + enum: + - completed + - incomplete + description: > + The status of the item (`completed`, `incomplete`). These have no + effect + + on the conversation, but are accepted for consistency with the + + `conversation.item.created` event. + role: + type: string + enum: + - user + - assistant + - systems + description: > + The role of the message sender (`user`, `assistant`, `system`), + only + + applicable for `message` items. + content: + type: array + x-oaiExpandable: true + description: > + The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + + - Message items of role `user` support `input_text` and + `input_audio` + content + - Message items of role `assistant` support `text` content. + items: type: object + x-oaiExpandable: true properties: - object: - enum: - - organization.project.service_account.api_key - type: string - description: 'The object type, which is always `organization.project.service_account.api_key`' - value: - type: string - name: - type: string - created_at: - type: integer - id: - type: string - ProjectServiceAccountDeleteResponse: - required: - - object - - id - - deleted + type: + type: string + enum: + - input_audio + - input_text + - text + description: The content type (`input_text`, `input_audio`, `text`). + text: + type: string + description: > + The text content, used for `input_text` and `text` content + types. + audio: + type: string + description: > + Base64-encoded audio bytes, used for `input_audio` content + type. + transcript: + type: string + description: > + The transcript of the audio, used for `input_audio` content + type. + call_id: + type: string + description: > + The ID of the function call (for `function_call` and + + `function_call_output` items). If passed on a + `function_call_output` + + item, the server will check that a `function_call` item with the + same + + ID exists in the conversation history. + name: + type: string + description: | + The name of the function being called (for `function_call` items). + arguments: + type: string + description: | + The arguments of the function call (for `function_call` items). + output: + type: string + description: | + The output of the function call (for `function_call_output` items). + RealtimeResponse: + type: object + description: The response resource. + properties: + id: + type: string + description: The unique ID of the response. + object: + type: string + enum: + - realtime.response + description: The object type, must be `realtime.response`. + status: + type: string + enum: + - completed + - cancelled + - failed + - incomplete + description: > + The final status of the response (`completed`, `cancelled`, + `failed`, or + + `incomplete`). + status_details: + type: object + description: Additional details about the status. + properties: + type: + type: string + enum: + - completed + - cancelled + - failed + - incomplete + description: > + The type of error that caused the response to fail, + corresponding + + with the `status` field (`cancelled`, `incomplete`, `failed`). + reason: + type: string + enum: + - turn_detected + - client_cancelled + - max_output_tokens + - content_filter + description: > + The reason the Response did not complete. For a `cancelled` + Response, + + one of `turn_detected` (the server VAD detected a new start of + speech) + + or `client_cancelled` (the client sent a cancel event). For an + + `incomplete` Response, one of `max_output_tokens` or + `content_filter` + + (the server-side safety filter activated and cut off the + response). + error: + type: object + description: | + A description of the error that caused the response to fail, + populated when the `status` is `failed`. + properties: + type: + type: string + description: The type of error. + code: + type: string + description: Error code, if any. + output: + type: array + description: The list of output items generated by the response. + items: + $ref: "#/components/schemas/RealtimeConversationItem" + usage: + type: object + description: > + Usage statistics for the Response, this will correspond to billing. + A + + Realtime API session will maintain a conversation context and append + new + + Items to the Conversation, thus output from previous turns (text + and + + audio tokens) will become the input for later turns. + properties: + total_tokens: + type: integer + description: > + The total number of tokens in the Response including input and + output + + text and audio tokens. + input_tokens: + type: integer + description: > + The number of input tokens used in the Response, including text + and + + audio tokens. + output_tokens: + type: integer + description: > + The number of output tokens sent in the Response, including text + and + + audio tokens. + input_token_details: + type: object + description: Details about the input tokens used in the Response. + properties: + cached_tokens: + type: integer + description: The number of cached tokens used in the Response. + text_tokens: + type: integer + description: The number of text tokens used in the Response. + audio_tokens: + type: integer + description: The number of audio tokens used in the Response. + output_token_details: + type: object + description: Details about the output tokens used in the Response. + properties: + text_tokens: + type: integer + description: The number of text tokens used in the Response. + audio_tokens: + type: integer + description: The number of audio tokens used in the Response. + RealtimeServerEventConversationCreated: + type: object + description: > + Returned when a conversation is created. Emitted right after session + creation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.created + description: The event type, must be `conversation.created`. + conversation: + type: object + description: The conversation resource. + properties: + id: + type: string + description: The unique ID of the conversation. + object: + type: string + description: The object type, must be `realtime.conversation`. + required: + - event_id + - type + - conversation + x-oaiMeta: + name: conversation.created + group: realtime + example: | + { + "event_id": "event_9101", + "type": "conversation.created", + "conversation": { + "id": "conv_001", + "object": "realtime.conversation" + } + } + RealtimeServerEventConversationItemCreated: + type: object + description: > + Returned when a conversation item is created. There are several + scenarios that + + produce this event: + - The server is generating a Response, which if successful will produce + either one or two Items, which will be of type `message` + (role `assistant`) or type `function_call`. + - The input audio buffer has been committed, either by the client or the + server (in `server_vad` mode). The server will take the content of the + input audio buffer and add it to a new user message Item. + - The client has sent a `conversation.item.create` event to add a new Item + to the Conversation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.created + description: The event type, must be `conversation.item.created`. + previous_item_id: + type: string + description: > + The ID of the preceding item in the Conversation context, allows + the + + client to understand the order of the conversation. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - event_id + - type + - previous_item_id + - item + x-oaiMeta: + name: conversation.item.created + group: realtime + example: | + { + "event_id": "event_1920", + "type": "conversation.item.created", + "previous_item_id": "msg_002", + "item": { + "id": "msg_003", + "object": "realtime.item", + "type": "message", + "status": "completed", + "role": "user", + "content": [ + { + "type": "input_audio", + "transcript": "hello how are you", + "audio": "base64encodedaudio==" + } + ] + } + } + RealtimeServerEventConversationItemDeleted: + type: object + description: > + Returned when an item in the conversation is deleted by the client with + a + + `conversation.item.delete` event. This event is used to synchronize the + + server's understanding of the conversation history with the client's + view. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.deleted + description: The event type, must be `conversation.item.deleted`. + item_id: + type: string + description: The ID of the item that was deleted. + required: + - event_id + - type + - item_id + x-oaiMeta: + name: conversation.item.deleted + group: realtime + example: | + { + "event_id": "event_2728", + "type": "conversation.item.deleted", + "item_id": "msg_005" + } + RealtimeServerEventConversationItemInputAudioTranscriptionCompleted: + type: object + description: > + This event is the output of audio transcription for user audio written + to the + + user audio buffer. Transcription begins when the input audio buffer is + + committed by the client or server (in `server_vad` mode). Transcription + runs + + asynchronously with Response creation, so this event may come before or + after + + the Response events. + + + Realtime API models accept audio natively, and thus input transcription + is a + + separate process run on a separate ASR (Automatic Speech Recognition) + model, + + currently always `whisper-1`. Thus the transcript may diverge somewhat + from + + the model's interpretation, and should be treated as a rough guide. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.input_audio_transcription.completed + description: | + The event type, must be + `conversation.item.input_audio_transcription.completed`. + item_id: + type: string + description: The ID of the user message item containing the audio. + content_index: + type: integer + description: The index of the content part containing the audio. + transcript: + type: string + description: The transcribed text. + required: + - event_id + - type + - item_id + - content_index + - transcript + x-oaiMeta: + name: conversation.item.input_audio_transcription.completed + group: realtime + example: | + { + "event_id": "event_2122", + "type": "conversation.item.input_audio_transcription.completed", + "item_id": "msg_003", + "content_index": 0, + "transcript": "Hello, how are you?" + } + RealtimeServerEventConversationItemInputAudioTranscriptionFailed: + type: object + description: > + Returned when input audio transcription is configured, and a + transcription + + request for a user message failed. These events are separate from other + + `error` events so that the client can identify the related Item. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.input_audio_transcription.failed + description: | + The event type, must be + `conversation.item.input_audio_transcription.failed`. + item_id: + type: string + description: The ID of the user message item. + content_index: + type: integer + description: The index of the content part containing the audio. + error: + type: object + description: Details of the transcription error. + properties: + type: + type: string + description: The type of error. + code: + type: string + description: Error code, if any. + message: + type: string + description: A human-readable error message. + param: + type: string + description: Parameter related to the error, if any. + required: + - event_id + - type + - item_id + - content_index + - error + x-oaiMeta: + name: conversation.item.input_audio_transcription.failed + group: realtime + example: | + { + "event_id": "event_2324", + "type": "conversation.item.input_audio_transcription.failed", + "item_id": "msg_003", + "content_index": 0, + "error": { + "type": "transcription_error", + "code": "audio_unintelligible", + "message": "The audio could not be transcribed.", + "param": null + } + } + RealtimeServerEventConversationItemTruncated: + type: object + description: > + Returned when an earlier assistant audio message item is truncated by + the + + client with a `conversation.item.truncate` event. This event is used to + + synchronize the server's understanding of the audio with the client's + playback. + + + This action will truncate the audio and remove the server-side text + transcript + + to ensure there is no text in the context that hasn't been heard by the + user. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.truncated + description: The event type, must be `conversation.item.truncated`. + item_id: + type: string + description: The ID of the assistant message item that was truncated. + content_index: + type: integer + description: The index of the content part that was truncated. + audio_end_ms: + type: integer + description: | + The duration up to which the audio was truncated, in milliseconds. + required: + - event_id + - type + - item_id + - content_index + - audio_end_ms + x-oaiMeta: + name: conversation.item.truncated + group: realtime + example: | + { + "event_id": "event_2526", + "type": "conversation.item.truncated", + "item_id": "msg_004", + "content_index": 0, + "audio_end_ms": 1500 + } + RealtimeServerEventError: + type: object + description: > + Returned when an error occurs, which could be a client problem or a + server + + problem. Most errors are recoverable and the session will stay open, we + + recommend to implementors to monitor and log error messages by default. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - error + description: The event type, must be `error`. + error: + type: object + description: Details of the error. + properties: + type: + type: string + description: > + The type of error (e.g., "invalid_request_error", + "server_error"). + code: + type: string + description: Error code, if any. + message: + type: string + description: A human-readable error message. + param: + type: string + description: Parameter related to the error, if any. + event_id: + type: string + description: > + The event_id of the client event that caused the error, if + applicable. + required: + - event_id + - type + - error + x-oaiMeta: + name: error + group: realtime + example: | + { + "event_id": "event_890", + "type": "error", + "error": { + "type": "invalid_request_error", + "code": "invalid_event", + "message": "The 'type' field is missing.", + "param": null, + "event_id": "event_567" + } + } + RealtimeServerEventInputAudioBufferCleared: + type: object + description: | + Returned when the input audio buffer is cleared by the client with a + `input_audio_buffer.clear` event. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.cleared + description: The event type, must be `input_audio_buffer.cleared`. + required: + - event_id + - type + x-oaiMeta: + name: input_audio_buffer.cleared + group: realtime + example: | + { + "event_id": "event_1314", + "type": "input_audio_buffer.cleared" + } + RealtimeServerEventInputAudioBufferCommitted: + type: object + description: > + Returned when an input audio buffer is committed, either by the client + or + + automatically in server VAD mode. The `item_id` property is the ID of + the user + + message item that will be created, thus a `conversation.item.created` + event + + will also be sent to the client. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.committed + description: The event type, must be `input_audio_buffer.committed`. + previous_item_id: + type: string + description: > + The ID of the preceding item after which the new item will be + inserted. + item_id: + type: string + description: The ID of the user message item that will be created. + required: + - event_id + - type + - previous_item_id + - item_id + x-oaiMeta: + name: input_audio_buffer.committed + group: realtime + example: | + { + "event_id": "event_1121", + "type": "input_audio_buffer.committed", + "previous_item_id": "msg_001", + "item_id": "msg_002" + } + RealtimeServerEventInputAudioBufferSpeechStarted: + type: object + description: > + Sent by the server when in `server_vad` mode to indicate that speech has + been + + detected in the audio buffer. This can happen any time audio is added to + the + + buffer (unless speech is already detected). The client may want to use + this + + event to interrupt audio playback or provide visual feedback to the + user. + + + The client should expect to receive a + `input_audio_buffer.speech_stopped` event + + when speech stops. The `item_id` property is the ID of the user message + item + + that will be created when speech stops and will also be included in the + + `input_audio_buffer.speech_stopped` event (unless the client manually + commits + + the audio buffer during VAD activation). + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.speech_started + description: The event type, must be `input_audio_buffer.speech_started`. + audio_start_ms: + type: integer + description: > + Milliseconds from the start of all audio written to the buffer + during the + + session when speech was first detected. This will correspond to the + + beginning of audio sent to the model, and thus includes the + + `prefix_padding_ms` configured in the Session. + item_id: + type: string + description: > + The ID of the user message item that will be created when speech + stops. + required: + - event_id + - type + - audio_start_ms + - item_id + x-oaiMeta: + name: input_audio_buffer.speech_started + group: realtime + example: | + { + "event_id": "event_1516", + "type": "input_audio_buffer.speech_started", + "audio_start_ms": 1000, + "item_id": "msg_003" + } + RealtimeServerEventInputAudioBufferSpeechStopped: + type: object + description: > + Returned in `server_vad` mode when the server detects the end of speech + in + + the audio buffer. The server will also send an + `conversation.item.created` + + event with the user message item that is created from the audio buffer. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.speech_stopped + description: The event type, must be `input_audio_buffer.speech_stopped`. + audio_end_ms: + type: integer + description: > + Milliseconds since the session started when speech stopped. This + will + + correspond to the end of audio sent to the model, and thus includes + the + + `min_silence_duration_ms` configured in the Session. + item_id: + type: string + description: The ID of the user message item that will be created. + required: + - event_id + - type + - audio_end_ms + - item_id + x-oaiMeta: + name: input_audio_buffer.speech_stopped + group: realtime + example: | + { + "event_id": "event_1718", + "type": "input_audio_buffer.speech_stopped", + "audio_end_ms": 2000, + "item_id": "msg_003" + } + RealtimeServerEventRateLimitsUpdated: + type: object + description: > + Emitted at the beginning of a Response to indicate the updated rate + limits. + + When a Response is created some tokens will be "reserved" for the + output + + tokens, the rate limits shown here reflect that reservation, which is + then + + adjusted accordingly once the Response is completed. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - rate_limits.updated + description: The event type, must be `rate_limits.updated`. + rate_limits: + type: array + description: List of rate limit information. + items: type: object properties: - object: - enum: - - organization.project.service_account.deleted - type: string - id: - type: string - deleted: - type: boolean - ProjectApiKey: - required: - - object - - redacted_value - - name - - created_at - - id - - owner + name: + type: string + description: The name of the rate limit (`requests`, `tokens`). + limit: + type: integer + description: The maximum allowed value for the rate limit. + remaining: + type: integer + description: The remaining value before the limit is reached. + reset_seconds: + type: number + description: Seconds until the rate limit resets. + required: + - event_id + - type + - rate_limits + x-oaiMeta: + name: rate_limits.updated + group: realtime + example: | + { + "event_id": "event_5758", + "type": "rate_limits.updated", + "rate_limits": [ + { + "name": "requests", + "limit": 1000, + "remaining": 999, + "reset_seconds": 60 + }, + { + "name": "tokens", + "limit": 50000, + "remaining": 49950, + "reset_seconds": 60 + } + ] + } + RealtimeServerEventResponseAudioDelta: + type: object + description: Returned when the model-generated audio is updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio.delta + description: The event type, must be `response.audio.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: Base64-encoded audio data delta. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta + x-oaiMeta: + name: response.audio.delta + group: realtime + example: | + { + "event_id": "event_4950", + "type": "response.audio.delta", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0, + "delta": "Base64EncodedAudioDelta" + } + RealtimeServerEventResponseAudioDone: + type: object + description: > + Returned when the model-generated audio is done. Also emitted when a + Response + + is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio.done + description: The event type, must be `response.audio.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + x-oaiMeta: + name: response.audio.done + group: realtime + example: | + { + "event_id": "event_5152", + "type": "response.audio.done", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0 + } + RealtimeServerEventResponseAudioTranscriptDelta: + type: object + description: > + Returned when the model-generated transcription of audio output is + updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio_transcript.delta + description: The event type, must be `response.audio_transcript.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The transcript delta. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta + x-oaiMeta: + name: response.audio_transcript.delta + group: realtime + example: | + { + "event_id": "event_4546", + "type": "response.audio_transcript.delta", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0, + "delta": "Hello, how can I a" + } + RealtimeServerEventResponseAudioTranscriptDone: + type: object + description: | + Returned when the model-generated transcription of audio output is done + streaming. Also emitted when a Response is interrupted, incomplete, or + cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio_transcript.done + description: The event type, must be `response.audio_transcript.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + transcript: + type: string + description: The final transcript of the audio. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - transcript + x-oaiMeta: + name: response.audio_transcript.done + group: realtime + example: | + { + "event_id": "event_4748", + "type": "response.audio_transcript.done", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0, + "transcript": "Hello, how can I assist you today?" + } + RealtimeServerEventResponseContentPartAdded: + type: object + description: > + Returned when a new content part is added to an assistant message item + during + + response generation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.content_part.added + description: The event type, must be `response.content_part.added`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item to which the content part was added. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + part: + type: object + description: The content part that was added. + properties: + type: + type: string + enum: + - audio + - text + description: The content type ("text", "audio"). + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part + x-oaiMeta: + name: response.content_part.added + group: realtime + example: | + { + "event_id": "event_3738", + "type": "response.content_part.added", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "part": { + "type": "text", + "text": "" + } + } + RealtimeServerEventResponseContentPartDone: + type: object + description: > + Returned when a content part is done streaming in an assistant message + item. + + Also emitted when a Response is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.content_part.done + description: The event type, must be `response.content_part.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + part: + type: object + description: The content part that is done. + properties: + type: + type: string + description: The content type ("text", "audio"). + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part + x-oaiMeta: + name: response.content_part.done + group: realtime + example: | + { + "event_id": "event_3940", + "type": "response.content_part.done", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "part": { + "type": "text", + "text": "Sure, I can help with that." + } + } + RealtimeServerEventResponseCreated: + type: object + description: > + Returned when a new Response is created. The first event of response + creation, + + where the response is in an initial state of `in_progress`. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.created + description: The event type, must be `response.created`. + response: + $ref: "#/components/schemas/RealtimeResponse" + required: + - event_id + - type + - response + x-oaiMeta: + name: response.created + group: realtime + example: | + { + "event_id": "event_2930", + "type": "response.created", + "response": { + "id": "resp_001", + "object": "realtime.response", + "status": "in_progress", + "status_details": null, + "output": [], + "usage": null + } + } + RealtimeServerEventResponseDone: + type: object + description: > + Returned when a Response is done streaming. Always emitted, no matter + the + + final state. The Response object included in the `response.done` event + will + + include all output Items in the Response but will omit the raw audio + data. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.done + description: The event type, must be `response.done`. + response: + $ref: "#/components/schemas/RealtimeResponse" + required: + - event_id + - type + - response + x-oaiMeta: + name: response.done + group: realtime + example: | + { + "event_id": "event_3132", + "type": "response.done", + "response": { + "id": "resp_001", + "object": "realtime.response", + "status": "completed", + "status_details": null, + "output": [ + { + "id": "msg_006", + "object": "realtime.item", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "text", + "text": "Sure, how can I assist you today?" + } + ] + } + ], + "usage": { + "total_tokens":275, + "input_tokens":127, + "output_tokens":148, + "input_token_details": { + "cached_tokens":384, + "text_tokens":119, + "audio_tokens":8, + "cached_tokens_details": { + "text_tokens": 128, + "audio_tokens": 256 + } + }, + "output_token_details": { + "text_tokens":36, + "audio_tokens":112 + } + } + } + } + RealtimeServerEventResponseFunctionCallArgumentsDelta: + type: object + description: | + Returned when the model-generated function call arguments are updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.function_call_arguments.delta + description: | + The event type, must be `response.function_call_arguments.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the function call item. + output_index: + type: integer + description: The index of the output item in the response. + call_id: + type: string + description: The ID of the function call. + delta: + type: string + description: The arguments delta as a JSON string. + required: + - event_id + - type + - response_id + - item_id + - output_index + - call_id + - delta + x-oaiMeta: + name: response.function_call_arguments.delta + group: realtime + example: | + { + "event_id": "event_5354", + "type": "response.function_call_arguments.delta", + "response_id": "resp_002", + "item_id": "fc_001", + "output_index": 0, + "call_id": "call_001", + "delta": "{\"location\": \"San\"" + } + RealtimeServerEventResponseFunctionCallArgumentsDone: + type: object + description: > + Returned when the model-generated function call arguments are done + streaming. + + Also emitted when a Response is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.function_call_arguments.done + description: | + The event type, must be `response.function_call_arguments.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the function call item. + output_index: + type: integer + description: The index of the output item in the response. + call_id: + type: string + description: The ID of the function call. + arguments: + type: string + description: The final arguments as a JSON string. + required: + - event_id + - type + - response_id + - item_id + - output_index + - call_id + - arguments + x-oaiMeta: + name: response.function_call_arguments.done + group: realtime + example: | + { + "event_id": "event_5556", + "type": "response.function_call_arguments.done", + "response_id": "resp_002", + "item_id": "fc_001", + "output_index": 0, + "call_id": "call_001", + "arguments": "{\"location\": \"San Francisco\"}" + } + RealtimeServerEventResponseOutputItemAdded: + type: object + description: Returned when a new Item is created during Response generation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.output_item.added + description: The event type, must be `response.output_item.added`. + response_id: + type: string + description: The ID of the Response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the Response. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - event_id + - type + - response_id + - output_index + - item + x-oaiMeta: + name: response.output_item.added + group: realtime + example: | + { + "event_id": "event_3334", + "type": "response.output_item.added", + "response_id": "resp_001", + "output_index": 0, + "item": { + "id": "msg_007", + "object": "realtime.item", + "type": "message", + "status": "in_progress", + "role": "assistant", + "content": [] + } + } + RealtimeServerEventResponseOutputItemDone: + type: object + description: > + Returned when an Item is done streaming. Also emitted when a Response + is + + interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.output_item.done + description: The event type, must be `response.output_item.done`. + response_id: + type: string + description: The ID of the Response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the Response. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - event_id + - type + - response_id + - output_index + - item + x-oaiMeta: + name: response.output_item.done + group: realtime + example: | + { + "event_id": "event_3536", + "type": "response.output_item.done", + "response_id": "resp_001", + "output_index": 0, + "item": { + "id": "msg_007", + "object": "realtime.item", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "text", + "text": "Sure, I can help with that." + } + ] + } + } + RealtimeServerEventResponseTextDelta: + type: object + description: Returned when the text value of a "text" content part is updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.text.delta + description: The event type, must be `response.text.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The text delta. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta + x-oaiMeta: + name: response.text.delta + group: realtime + example: | + { + "event_id": "event_4142", + "type": "response.text.delta", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "delta": "Sure, I can h" + } + RealtimeServerEventResponseTextDone: + type: object + description: > + Returned when the text value of a "text" content part is done streaming. + Also + + emitted when a Response is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.text.done + description: The event type, must be `response.text.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + text: + type: string + description: The final text content. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - text + x-oaiMeta: + name: response.text.done + group: realtime + example: | + { + "event_id": "event_4344", + "type": "response.text.done", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "text": "Sure, I can help with that." + } + RealtimeServerEventSessionCreated: + type: object + description: > + Returned when a Session is created. Emitted automatically when a new + + connection is established as the first server event. This event will + contain + + the default Session configuration. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - session.created + description: The event type, must be `session.created`. + session: + $ref: "#/components/schemas/RealtimeSession" + required: + - event_id + - type + - session + x-oaiMeta: + name: session.created + group: realtime + example: | + { + "event_id": "event_1234", + "type": "session.created", + "session": { + "id": "sess_001", + "object": "realtime.session", + "model": "gpt-4o-realtime-preview-2024-10-01", + "modalities": ["text", "audio"], + "instructions": "...model instructions here...", + "voice": "sage", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "input_audio_transcription": null, + "turn_detection": { + "type": "server_vad", + "threshold": 0.5, + "prefix_padding_ms": 300, + "silence_duration_ms": 200 + }, + "tools": [], + "tool_choice": "auto", + "temperature": 0.8, + "max_response_output_tokens": "inf" + } + } + RealtimeServerEventSessionUpdated: + type: object + description: > + Returned when a session is updated with a `session.update` event, + unless + + there is an error. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - session.updated + description: The event type, must be `session.updated`. + session: + $ref: "#/components/schemas/RealtimeSession" + required: + - event_id + - type + - session + x-oaiMeta: + name: session.updated + group: realtime + example: | + { + "event_id": "event_5678", + "type": "session.updated", + "session": { + "id": "sess_001", + "object": "realtime.session", + "model": "gpt-4o-realtime-preview-2024-10-01", + "modalities": ["text"], + "instructions": "New instructions", + "voice": "sage", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "input_audio_transcription": { + "model": "whisper-1" + }, + "turn_detection": null, + "tools": [], + "tool_choice": "none", + "temperature": 0.7, + "max_response_output_tokens": 200 + } + } + RealtimeSession: + type: object + description: Realtime session object configuration. + properties: + modalities: + description: | + The set of modalities the model can respond with. To disable audio, + set this to ["text"]. + items: + type: string + enum: + - text + - audio + instructions: + type: string + description: > + The default system instructions (i.e. system message) prepended to + model + + calls. This field allows the client to guide the model on desired + + responses. The model can be instructed on response content and + format, + + (e.g. "be extremely succinct", "act friendly", "here are examples of + good + + responses") and on audio behavior (e.g. "talk quickly", "inject + emotion + + into your voice", "laugh frequently"). The instructions are not + guaranteed + + to be followed by the model, but they provide guidance to the model + on the + + desired behavior. + + + Note that the server sets default instructions which will be used if + this + + field is not set and are visible in the `session.created` event at + the + + start of the session. + voice: + type: string + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + description: > + The voice the model uses to respond. Current voice options are + `ash`, + + `ballad`, `coral`, `sage`, and `verse`. + + + Also supported but not recommended are `alloy`, `echo`, and + `shimmer`. + + These older voices are less expressive. + + + Voice cannot be changed during the session once the model has + + responded with audio at least once. + input_audio_format: + type: string + description: > + The format of input audio. Options are `pcm16`, `g711_ulaw`, or + `g711_alaw`. + output_audio_format: + type: string + description: > + The format of output audio. Options are `pcm16`, `g711_ulaw`, or + `g711_alaw`. + input_audio_transcription: + type: object + description: > + Configuration for input audio transcription, defaults to off and can + be + + set to `null` to turn off once on. Input audio transcription is not + native + + to the model, since the model consumes audio directly. Transcription + runs + + asynchronously through Whisper and should be treated as rough + guidance + + rather than the representation understood by the model. + properties: + model: + type: string + description: > + The model to use for transcription, `whisper-1` is the only + currently + + supported model. + turn_detection: + type: object + description: > + Configuration for turn detection. Can be set to `null` to turn off. + Server + + VAD means that the model will detect the start and end of speech + based on + + audio volume and respond at the end of user speech. + properties: + type: + type: string + description: > + Type of turn detection, only `server_vad` is currently supported. + threshold: + type: number + description: > + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + A + + higher threshold will require louder audio to activate the + model, and + + thus might perform better in noisy environments. + prefix_padding_ms: + type: integer + description: | + Amount of audio to include before the VAD detected speech (in + milliseconds). Defaults to 300ms. + silence_duration_ms: + type: integer + description: > + Duration of silence to detect speech stop (in milliseconds). + Defaults + + to 500ms. With shorter values the model will respond more + quickly, + + but may jump in on short pauses from the user. + tools: + type: array + description: Tools (functions) available to the model. + items: type: object properties: - object: - enum: - - organization.project.api_key - type: string - description: 'The object type, which is always `organization.project.api_key`' - redacted_value: - type: string - description: The redacted value of the API key - name: - type: string - description: The name of the API key - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the API key was created - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - owner: - type: object - properties: - type: - enum: - - user - - service_account - type: string - description: '`user` or `service_account`' - user: - $ref: '#/components/schemas/ProjectUser' - service_account: - $ref: '#/components/schemas/ProjectServiceAccount' - description: Represents an individual API key in a project. - x-oaiMeta: - name: The project API key object - example: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" - ProjectApiKeyListResponse: - required: - - object - - data - - first_id - - last_id - - has_more + type: + type: string + enum: + - function + description: The type of the tool, i.e. `function`. + name: + type: string + description: The name of the function. + description: + type: string + description: > + The description of the function, including guidance on when + and how + + to call it, and guidance about what to tell the user when + calling + + (if anything). + parameters: + type: object + description: Parameters of the function in JSON Schema. + tool_choice: + type: string + description: > + How the model chooses tools. Options are `auto`, `none`, `required`, + or + + specify a function. + temperature: + type: number + description: > + Sampling temperature for the model, limited to [0.6, 1.2]. Defaults + to 0.8. + max_response_output_tokens: + oneOf: + - type: integer + - type: string + enum: + - inf + description: | + Maximum number of output tokens for a single assistant response, + inclusive of tool calls. Provide an integer between 1 and 4096 to + limit output tokens, or `inf` for the maximum available tokens for a + given model. Defaults to `inf`. + ResponseFormatJsonObject: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_object`" + enum: + - json_object + required: + - type + ResponseFormatJsonSchema: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_schema`" + enum: + - json_schema + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model + to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain + underscores and dashes, with a maximum length of 64. + schema: + $ref: "#/components/schemas/ResponseFormatJsonSchemaSchema" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the + output. If set to true, the model will always follow the exact + schema defined in the `schema` field. Only a subset of JSON + Schema is supported when `strict` is `true`. To learn more, read + the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema + ResponseFormatJsonSchemaSchema: + type: object + description: The schema for the response format, described as a JSON Schema object. + additionalProperties: true + ResponseFormatText: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `text`" + enum: + - text + required: + - type + RunCompletionUsage: + type: object + description: Usage statistics related to the run. This value will be `null` if + the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + RunObject: + type: object + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run`. + type: string + enum: + - thread.run + created_at: + description: The Unix timestamp (in seconds) for when the run was created. + type: integer + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was + executed on as a part of this run. + type: string + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for + execution of this run. + type: string + status: + description: The status of the run, which can be either `queued`, `in_progress`, + `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + `incomplete`, or `expired`. + type: string + enum: + - queued + - in_progress + - requires_action + - cancelling + - cancelled + - failed + - completed + - incomplete + - expired + required_action: + type: object + description: Details on the action required to continue the run. Will be `null` + if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: + - submit_tool_outputs + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there + are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: + - server_error + - rate_limit_exceeded + - invalid_prompt + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. + type: integer + nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is + not incomplete. + type: object + nullable: true + properties: + reason: + description: The reason why the run is incomplete. This will point to which + specific token limit was reached over the course of the run. + type: string + enum: + - max_completion_tokens + - max_prompt_tokens + model: + description: The model that the [assistant](/docs/api-reference/assistants) used + for this run. + type: string + instructions: + description: The instructions that the + [assistant](/docs/api-reference/assistants) used for this run. + type: string + tools: + description: The list of tools that the + [assistant](/docs/api-reference/assistants) used for this run. + default: [] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to + 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults + to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: > + The maximum number of prompt tokens specified to have been used over + the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: > + The maximum number of completion tokens specified to have been used + over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format + x-oaiMeta: + name: The run object + beta: true + example: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + RunStepCompletionUsage: + type: object + description: Usage statistics related to the run step. This value will be `null` + while the run step's status is `in_progress`. + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + RunStepDeltaObject: + type: object + title: Run step delta object + description: > + Represents a run step delta i.e. any changed fields on a run step during + streaming. + properties: + id: + description: The identifier of the run step, which can be referenced in API + endpoints. + type: string + object: + description: The object type, which is always `thread.run.step.delta`. + type: string + enum: + - thread.run.step.delta + delta: + description: The delta containing the fields that have changed on the run step. + type: object + properties: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: + - message_creation + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - type + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be + `code_interpreter` for this type of tool call. + enum: + - code_interpreter + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter + can output one or more items, including text (`logs`) or images + (`image`). Each of these are represented by a different object + type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjec\ + t" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObje\ + ct" + x-oaiExpandable: true + required: + - index + - type + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `image`. + type: string + enum: + - image + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - index + - type + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `logs`. + type: string + enum: + - logs + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - index + - type + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for + this type of tool call. + enum: + - file_search + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - index + - type + - file_search + RunStepDeltaStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for + this type of tool call. + enum: + - function + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have + not been [submitted](/docs/api-reference/runs/submitToolOutputs) + yet. + nullable: true + required: + - index + - type + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: + - tool_calls + tool_calls: + type: array + description: > + An array of tool calls the run step was involved in. These can be + associated with one of three types of tools: `code_interpreter`, + `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + RunStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: + - message_creation + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be + `code_interpreter` for this type of tool call. + enum: + - code_interpreter + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + required: + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter + can output one or more items, including text (`logs`) or images + (`image`). Each of these are represented by a different object + type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: + - image + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - file_id + required: + - type + - image + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. + type: string + enum: + - logs + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for + this type of tool call. + enum: + - file_search + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + properties: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObje\ + ct" + results: + type: array + description: The results of the file search. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" + required: + - id + - type + - file_search + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options + type: object + description: The ranking options for the file search. + properties: + ranker: + type: string + description: The ranker used for the file search. + enum: + - default_2024_08_21 + score_threshold: + type: number + description: The score threshold for the file search. All values must be a + floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result + type: object + description: A result instance of the file search. + x-oaiTypeLabel: map + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number + between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: The content of the result that was found. The content is only + included if requested via the include query parameter. + items: type: object properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/ProjectApiKey' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectApiKeyDeleteResponse: - required: - - object - - id - - deleted + type: + type: string + description: The type of the content. + enum: + - text + text: + type: string + description: The text content of the file. + required: + - file_id + - file_name + - score + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for + this type of tool call. + enum: + - function + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have + not been [submitted](/docs/api-reference/runs/submitToolOutputs) + yet. + nullable: true + required: + - name + - arguments + - output + required: + - id + - type + - function + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: + - tool_calls + tool_calls: + type: array + description: > + An array of tool calls the run step was involved in. These can be + associated with one of three types of tools: `code_interpreter`, + `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + RunStepObject: + type: object + title: Run steps + description: | + Represents a step in execution of a run. + properties: + id: + description: The identifier of the run step, which can be referenced in API + endpoints. + type: string + object: + description: The object type, which is always `thread.run.step`. + type: string + enum: + - thread.run.step + created_at: + description: The Unix timestamp (in seconds) for when the run step was created. + type: integer + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) + associated with the run step. + type: string + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. + type: string + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is + a part of. + type: string + type: + description: The type of run step, which can be either `message_creation` or + `tool_calls`. + type: string + enum: + - message_creation + - tool_calls + status: + description: The status of the run step, which can be either `in_progress`, + `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: + - in_progress + - cancelled + - failed + - completed + - expired + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: + type: object + description: The last error associated with this run step. Will be `null` if + there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: + - server_error + - rate_limit_exceeded + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A + step is considered expired if the parent run is expired. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage + x-oaiMeta: + name: The run step object + beta: true + example: | + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + RunStepStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: + - thread.run.step.created + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.in_progress + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.delta + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run + step](/docs/api-reference/run-steps/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step + delta](/docs/api-reference/assistants-streaming/run-step-delta-ob\ + ject)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.completed + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.failed + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.cancelled + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.expired + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: + - thread.run.created + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.queued + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a + `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.in_progress + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an + `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.requires_action + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a + `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.completed + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.incomplete + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with + status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.failed + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.cancelling + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a + `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.cancelled + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.expired + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + RunToolCallObject: + type: object + description: Tool call objects + properties: + id: + type: string + description: The ID of the tool call. This ID must be referenced when you submit + the tool outputs in using the [Submit tool outputs to + run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + description: The type of tool call the output is required for. For now, this is + always `function`. + enum: + - function + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function + StaticChunkingStrategy: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is + `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: > + The number of tokens that overlap between chunks. The default value + is `400`. + + + Note that the overlap must not exceed half of + `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + StaticChunkingStrategyRequestParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: + - static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + StaticChunkingStrategyResponseParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: + - static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + SubmitToolOutputsRunRequest: + type: object + additionalProperties: false + properties: + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: type: object properties: - object: - enum: - - organization.project.api_key.deleted - type: string - id: - type: string - deleted: - type: boolean - securitySchemes: - ApiKeyAuth: - type: http - scheme: bearer + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the + run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: > + If `true`, returns a stream of events that happen during the Run as + server-sent events, terminating when the Run enters a terminal state + with a `data: [DONE]` message. + required: + - tool_outputs + ThreadObject: + type: object + title: Thread + description: Represents a thread that contains + [messages](/docs/api-reference/messages). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread`. + type: string + enum: + - thread + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: + type: object + description: > + A set of resources that are made available to the assistant's tools + in this thread. The resources are specific to the type of tool. For + example, the `code_interpreter` tool requires a list of file IDs, + while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector + store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - created_at + - tool_resources + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + ThreadStreamEvent: + oneOf: + - type: object + properties: + enabled: + type: boolean + description: Whether to enable input audio transcription. + event: + type: string + enum: + - thread.created + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is + created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + TranscriptionSegment: + type: object + properties: + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: + type: array + items: + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, + consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, + consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher + than 1.0 and the `avg_logprob` is below -1, consider this segment + silent. + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + TranscriptionWord: + type: object + properties: + word: + type: string + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: + - word + - start + - end + TruncationObject: + type: object + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use + this to control the intial context window of the run. + properties: + type: + type: string + description: The truncation strategy to use for the thread. The default is + `auto`. If set to `last_messages`, the thread will be truncated to + the n most recent messages in the thread. When set to `auto`, + messages in the middle of the thread will be dropped to fit the + context length of the model, `max_prompt_tokens`. + enum: + - auto + - last_messages + last_messages: + type: integer + description: The number of most recent messages from the thread when + constructing the context for the run. + minimum: 1 + nullable: true + required: + - type + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + Upload: + type: object + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. + properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: The intended purpose of the file. [Please refer + here](/docs/api-reference/files/object#files/object-purpose) for + acceptable values. + status: + type: string + description: The status of the Upload. + enum: + - pending + - completed + - cancelled + - expired + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + type: string + description: The object type, which is always "upload". + enum: + - upload + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. + required: + - bytes + - created_at + - expires_at + - filename + - id + - purpose + - status + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadPart: + type: object + title: UploadPart + description: > + The upload Part represents a chunk of bytes we can add to an Upload + object. + properties: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + description: The object type, which is always `upload.part`. + enum: + - upload.part + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } + UsageAudioSpeechesResult: + type: object + description: The aggregated audio speeches usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.audio_speeches.result + characters: + type: integer + description: The number of characters processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - characters + - num_model_requests + x-oaiMeta: + name: Audio speeches usage object + example: | + { + "object": "orgainzation.usage.audio_speeches.result", + "characters": 45, + "num_model_requests": 1, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "tts-1" + } + UsageAudioTranscriptionsResult: + type: object + description: The aggregated audio transcriptions usage details of the specific + time bucket. + properties: + object: + type: string + enum: + - organization.usage.audio_transcriptions.result + seconds: + type: integer + description: The number of seconds processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - seconds + - num_model_requests + x-oaiMeta: + name: Audio transcriptions usage object + example: | + { + "object": "orgainzation.usage.audio_transcriptions.result", + "seconds": 10, + "num_model_requests": 1, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "tts-1" + } + UsageCodeInterpreterSessionsResult: + type: object + description: The aggregated code interpreter sessions usage details of the + specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.code_interpreter_sessions.result + sessions: + type: integer + description: The number of code interpreter sessions. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + required: + - object + - sessions + x-oaiMeta: + name: Code interpreter sessions usage object + example: | + { + "object": "orgainzation.usage.code_interpreter_sessions.result", + "sessions": 1, + "project_id": "proj_abc" + } + UsageCompletionsResult: + type: object + description: The aggregated completions usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.completions.result + input_tokens: + type: integer + description: The number of input tokens used. + input_cached_tokens: + type: integer + description: The number of input tokens that has been cached from previous + requests. + output_tokens: + type: integer + description: The number of output tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + batch: + type: boolean + description: When `group_by=batch`, this field tells whether the grouped usage + result is batch or not. + required: + - object + - input_tokens + - output_tokens + - num_model_requests + x-oaiMeta: + name: Completions usage object + example: | + { + "object": "orgainzation.usage.completions.result", + "input_tokens": 5000, + "output_tokens": 1000, + "input_cached_tokens": 4000, + "num_model_requests": 5, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "gpt-4o-mini-2024-07-18", + "batch": false + } + UsageEmbeddingsResult: + type: object + description: The aggregated embeddings usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.embeddings.result + input_tokens: + type: integer + description: The number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - input_tokens + - num_model_requests + x-oaiMeta: + name: Embeddings usage object + example: | + { + "object": "orgainzation.usage.embeddings.result", + "input_tokens": 20, + "num_model_requests": 2, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "text-embedding-ada-002-v2" + } + UsageImagesResult: + type: object + description: The aggregated images usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.images.result + images: + type: integer + description: The number of images processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + source: + type: string + description: When `group_by=source`, this field provides the source of the + grouped usage result, possible values are `image.generation`, + `image.edit`, `image.variation`. + size: + type: string + description: When `group_by=size`, this field provides the image size of the + grouped usage result. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - images + - num_model_requests + x-oaiMeta: + name: Images usage object + example: | + { + "object": "orgainzation.usage.images.result", + "images": 2, + "num_model_requests": 2, + "size": "1024x1024", + "source": "image.generation", + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "dall-e-3" + } + UsageModerationsResult: + type: object + description: The aggregated moderations usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.moderations.result + input_tokens: + type: integer + description: The number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - input_tokens + - num_model_requests + x-oaiMeta: + name: Moderations usage object + example: | + { + "object": "orgainzation.usage.moderations.result", + "input_tokens": 20, + "num_model_requests": 2, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "text-moderation" + } + UsageResponse: + type: object + properties: + object: + type: string + enum: + - page + data: + type: array + items: + $ref: "#/components/schemas/UsageTimeBucket" + has_more: + type: boolean + next_page: + type: string + required: + - object + - data + - has_more + - next_page + UsageTimeBucket: + type: object + properties: + object: + type: string + enum: + - bucket + start_time: + type: integer + end_time: + type: integer + result: + type: array + items: + oneOf: + - $ref: "#/components/schemas/UsageCompletionsResult" + - $ref: "#/components/schemas/UsageEmbeddingsResult" + - $ref: "#/components/schemas/UsageModerationsResult" + - $ref: "#/components/schemas/UsageImagesResult" + - $ref: "#/components/schemas/UsageAudioSpeechesResult" + - $ref: "#/components/schemas/UsageAudioTranscriptionsResult" + - $ref: "#/components/schemas/UsageVectorStoresResult" + - $ref: "#/components/schemas/UsageCodeInterpreterSessionsResult" + - $ref: "#/components/schemas/CostsResult" + required: + - object + - start_time + - end_time + - result + UsageVectorStoresResult: + type: object + description: The aggregated vector stores usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.vector_stores.result + usage_bytes: + type: integer + description: The vector stores usage in bytes. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + required: + - object + - usage_bytes + x-oaiMeta: + name: Vector stores usage object + example: | + { + "object": "orgainzation.usage.vector_stores.result", + "usage_bytes": 1024, + "project_id": "proj_abc" + } + User: + type: object + description: Represents an individual `user` within an organization. + properties: + object: + type: string + enum: + - organization.user + description: The object type, which is always `organization.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the user was added. + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The user object + example: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + UserDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.user.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + UserListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/User" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + UserRoleUpdateRequest: + type: object + properties: + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + required: + - role + VectorStoreExpirationAfter: + type: object + title: Vector store expiration policy + description: The expiration policy for a vector store. + properties: + anchor: + description: "Anchor timestamp after which the expiration policy applies. + Supported anchors: `last_active_at`." + type: string + enum: + - last_active_at + days: + description: The number of days after the anchor time that the vector store will + expire. + type: integer + minimum: 1 + maximum: 365 + required: + - anchor + - days + VectorStoreFileBatchObject: + type: object + title: Vector store file batch + description: A batch of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file_batch`. + type: string + enum: + - vector_store.files_batch + created_at: + description: The Unix timestamp (in seconds) for when the vector store files + batch was created. + type: integer + vector_store_id: + description: The ID of the [vector + store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store files batch, which can be either + `in_progress`, `completed`, `cancelled` or `failed`. + type: string + enum: + - in_progress + - completed + - cancelled + - failed + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + x-oaiMeta: + name: The vector store files batch object + beta: true + example: | + { + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 + } + } + VectorStoreFileObject: + type: object + title: Vector store files + description: A list of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file`. + type: string + enum: + - vector_store.file + usage_bytes: + description: The total vector store usage in bytes. Note that this may be + different from the original file size. + type: integer + created_at: + description: The Unix timestamp (in seconds) for when the vector store file was + created. + type: integer + vector_store_id: + description: The ID of the [vector + store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store file, which can be either + `in_progress`, `completed`, `cancelled`, or `failed`. The status + `completed` indicates that the vector store file is ready for use. + type: string + enum: + - in_progress + - completed + - cancelled + - failed + last_error: + type: object + description: The last error associated with this vector store file. Will be + `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: + - server_error + - unsupported_file + - invalid_file + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + chunking_strategy: + type: object + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true + required: + - id + - object + - usage_bytes + - created_at + - vector_store_id + - status + - last_error + x-oaiMeta: + name: The vector store file object + beta: true + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } + } + VectorStoreObject: + type: object + title: Vector store + description: A vector store is a collection of processed files can be used by + the `file_search` tool. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store`. + type: string + enum: + - vector_store + created_at: + description: The Unix timestamp (in seconds) for when the vector store was + created. + type: integer + name: + description: The name of the vector store. + type: string + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, + `in_progress`, or `completed`. A status of `completed` indicates + that the vector store is ready for use. + type: string + enum: + - expired + - in_progress + - completed + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will + expire. + type: integer + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last + active. + type: integer + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata + x-oaiMeta: + name: The vector store object + beta: true + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } + securitySchemes: + ApiKeyAuth: + type: http + scheme: bearer security: - - ApiKeyAuth: [ ] -tags: - - name: Assistants - description: Build Assistants that can call models and use tools. - - name: Audio - description: Turn audio into text or text into audio. - - name: Chat - description: 'Given a list of messages comprising a conversation, the model will return a response.' - - name: Completions - description: 'Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.' - - name: Embeddings - description: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. - - name: Fine-tuning - description: Manage fine-tuning jobs to tailor a model to your specific training data. - - name: Batch - description: Create large batches of API requests to run asynchronously. - - name: Files - description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. - - name: Uploads - description: Use Uploads to upload large files in multiple parts. - - name: Images - description: 'Given a prompt and/or an input image, the model will generate a new image.' - - name: Models - description: List and describe the various models available in the API. - - name: Moderations - description: 'Given a input text, outputs if the model classifies it as potentially harmful.' - - name: Audit Logs - description: List user actions and configuration changes within this organization. + - ApiKeyAuth: [] x-oaiMeta: - navigationGroups: - - id: endpoints - title: Endpoints - - id: assistants - title: Assistants - - id: administration - title: Administration - - id: legacy - title: Legacy - groups: - - id: audio - title: Audio - description: "Learn how to turn audio into text or text into audio.\n\nRelated guide: [Speech to text](/docs/guides/speech-to-text)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createSpeech - path: createSpeech - - type: endpoint - key: createTranscription - path: createTranscription - - type: endpoint - key: createTranslation - path: createTranslation - - type: object - key: CreateTranscriptionResponseJson - path: json-object - - type: object - key: CreateTranscriptionResponseVerboseJson - path: verbose-json-object - - id: chat - title: Chat - description: "Given a list of messages comprising a conversation, the model will return a response.\n\nRelated guide: [Chat Completions](/docs/guides/text-generation)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createChatCompletion - path: create - - type: object - key: CreateChatCompletionResponse - path: object - - type: object - key: CreateChatCompletionStreamResponse - path: streaming - - id: embeddings - title: Embeddings - description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\n\nRelated guide: [Embeddings](/docs/guides/embeddings)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createEmbedding - path: create - - type: object - key: Embedding - path: object - - id: fine-tuning - title: Fine-tuning - description: "Manage fine-tuning jobs to tailor a model to your specific training data.\n\nRelated guide: [Fine-tune models](/docs/guides/fine-tuning)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createFineTuningJob - path: create - - type: endpoint - key: listPaginatedFineTuningJobs - path: list - - type: endpoint - key: listFineTuningEvents - path: list-events - - type: endpoint - key: listFineTuningJobCheckpoints - path: list-checkpoints - - type: endpoint - key: retrieveFineTuningJob - path: retrieve - - type: endpoint - key: cancelFineTuningJob - path: cancel - - type: object - key: FinetuneChatRequestInput - path: chat-input - - type: object - key: FinetuneCompletionRequestInput - path: completions-input - - type: object - key: FineTuningJob - path: object - - type: object - key: FineTuningJobEvent - path: event-object - - type: object - key: FineTuningJobCheckpoint - path: checkpoint-object - - id: batch - title: Batch - description: "Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.\n\nRelated guide: [Batch](/docs/guides/batch)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createBatch - path: create - - type: endpoint - key: retrieveBatch - path: retrieve - - type: endpoint - key: cancelBatch - path: cancel - - type: endpoint - key: listBatches - path: list - - type: object - key: Batch - path: object - - type: object - key: BatchRequestInput - path: request-input - - type: object - key: BatchRequestOutput - path: request-output - - id: files - title: Files - description: "Files are used to upload documents that can be used with features like [Assistants](/docs/api-reference/assistants), [Fine-tuning](/docs/api-reference/fine-tuning), and [Batch API](/docs/guides/batch).\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createFile - path: create - - type: endpoint - key: listFiles - path: list - - type: endpoint - key: retrieveFile - path: retrieve - - type: endpoint - key: deleteFile - path: delete - - type: endpoint - key: downloadFile - path: retrieve-contents - - type: object - key: OpenAIFile - path: object - - id: uploads - title: Uploads - description: "Allows you to upload large files in multiple parts.\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createUpload - path: create - - type: endpoint - key: addUploadPart - path: add-part - - type: endpoint - key: completeUpload - path: complete - - type: endpoint - key: cancelUpload - path: cancel - - type: object - key: Upload - path: object - - type: object - key: UploadPart - path: part-object - - id: images - title: Images - description: "Given a prompt and/or an input image, the model will generate a new image.\n\nRelated guide: [Image generation](/docs/guides/images)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createImage - path: create - - type: endpoint - key: createImageEdit - path: createEdit - - type: endpoint - key: createImageVariation - path: createVariation - - type: object - key: Image - path: object - - id: models - title: Models - description: "List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them.\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: listModels - path: list - - type: endpoint - key: retrieveModel - path: retrieve - - type: endpoint - key: deleteModel - path: delete - - type: object - key: Model - path: object - - id: moderations - title: Moderations - description: "Given some input text, outputs if the model classifies it as potentially harmful across several categories.\n\nRelated guide: [Moderations](/docs/guides/moderation)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createModeration - path: create - - type: object - key: CreateModerationResponse - path: object - - id: assistants - title: Assistants - beta: true - description: "Build assistants that can call models and use tools to perform tasks.\n\n[Get started with the Assistants API](/docs/assistants)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createAssistant - path: createAssistant - - type: endpoint - key: listAssistants - path: listAssistants - - type: endpoint - key: getAssistant - path: getAssistant - - type: endpoint - key: modifyAssistant - path: modifyAssistant - - type: endpoint - key: deleteAssistant - path: deleteAssistant - - type: object - key: AssistantObject - path: object - - id: threads - title: Threads - beta: true - description: "Create threads that assistants can interact with.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createThread - path: createThread - - type: endpoint - key: getThread - path: getThread - - type: endpoint - key: modifyThread - path: modifyThread - - type: endpoint - key: deleteThread - path: deleteThread - - type: object - key: ThreadObject - path: object - - id: messages - title: Messages - beta: true - description: "Create messages within threads\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createMessage - path: createMessage - - type: endpoint - key: listMessages - path: listMessages - - type: endpoint - key: getMessage - path: getMessage - - type: endpoint - key: modifyMessage - path: modifyMessage - - type: endpoint - key: deleteMessage - path: deleteMessage - - type: object - key: MessageObject - path: object - - id: runs - title: Runs - beta: true - description: "Represents an execution run on a thread.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createRun - path: createRun - - type: endpoint - key: createThreadAndRun - path: createThreadAndRun - - type: endpoint - key: listRuns - path: listRuns - - type: endpoint - key: getRun - path: getRun - - type: endpoint - key: modifyRun - path: modifyRun - - type: endpoint - key: submitToolOuputsToRun - path: submitToolOutputs - - type: endpoint - key: cancelRun - path: cancelRun - - type: object - key: RunObject - path: object - - id: run-steps - title: Run Steps - beta: true - description: "Represents the steps (model and tool calls) taken during the run.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: listRunSteps - path: listRunSteps - - type: endpoint - key: getRunStep - path: getRunStep - - type: object - key: RunStepObject - path: step-object - - id: vector-stores - title: Vector Stores - beta: true - description: "Vector stores are used to store files for use by the `file_search` tool.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createVectorStore - path: create - - type: endpoint - key: listVectorStores - path: list - - type: endpoint - key: getVectorStore - path: retrieve - - type: endpoint - key: modifyVectorStore - path: modify - - type: endpoint - key: deleteVectorStore - path: delete - - type: object - key: VectorStoreObject - path: object - - id: vector-stores-files - title: Vector Store Files - beta: true - description: "Vector store files represent files inside a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createVectorStoreFile - path: createFile - - type: endpoint - key: listVectorStoreFiles - path: listFiles - - type: endpoint - key: getVectorStoreFile - path: getFile - - type: endpoint - key: deleteVectorStoreFile - path: deleteFile - - type: object - key: VectorStoreFileObject - path: file-object - - id: vector-stores-file-batches - title: Vector Store File Batches - beta: true - description: "Vector store file batches represent operations to add multiple files to a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createVectorStoreFileBatch - path: createBatch - - type: endpoint - key: getVectorStoreFileBatch - path: getBatch - - type: endpoint - key: cancelVectorStoreFileBatch - path: cancelBatch - - type: endpoint - key: listFilesInVectorStoreBatch - path: listBatchFiles - - type: object - key: VectorStoreFileBatchObject - path: batch-object - - id: assistants-streaming - title: Streaming - beta: true - description: "Stream the result of executing a Run or resuming a Run after submitting tool outputs.\n\nYou can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun),\n[Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs)\nendpoints by passing `\"stream\": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream.\n\nOur Node and Python SDKs provide helpful utilities to make streaming easy. Reference the\n[Assistants API quickstart](/docs/assistants/overview) to learn more.\n" - navigationGroup: assistants - sections: - - type: object - key: MessageDeltaObject - path: message-delta-object - - type: object - key: RunStepDeltaObject - path: run-step-delta-object - - type: object - key: AssistantStreamEvent - path: events - - id: administration - title: Overview - description: "Programmatically manage your organization. \n\nThe Audit Logs endpoint provides a log of all actions taken in the \norganization for security and monitoring purposes.\n\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\n\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization)\n" - navigationGroup: administration - - id: invite - title: Invites - description: Invite and manage invitations for an organization. Invited users are automatically added to the Default project. - navigationGroup: administration - sections: - - type: endpoint - key: list-invites - path: list - - type: endpoint - key: inviteUser - path: create - - type: endpoint - key: retrieve-invite - path: retrieve - - type: endpoint - key: delete-invite - path: delete - - type: object - key: Invite - path: object - - id: users - title: Users - description: "Manage users and their role in an organization. Users will be automatically added to the Default project.\n" - navigationGroup: administration - sections: - - type: endpoint - key: list-users - path: list - - type: endpoint - key: modify-user - path: modify - - type: endpoint - key: retrieve-user - path: retrieve - - type: endpoint - key: delete-user - path: delete - - type: object - key: User - path: object - - id: projects - title: Projects - description: "Manage the projects within an orgnanization includes creation, updating, and archiving or projects. \nThe Default project cannot be modified or archived. \n" - navigationGroup: administration - sections: - - type: endpoint - key: list-projects - path: list - - type: endpoint - key: create-project - path: create - - type: endpoint - key: retrieve-project - path: retrieve - - type: endpoint - key: modify-project - path: modify - - type: endpoint - key: archive-project - path: archive - - type: object - key: Project - path: object - - id: project-users - title: Project Users - description: "Manage users within a project, including adding, updating roles, and removing users. \nUsers cannot be removed from the Default project, unless they are being removed from the organization. \n" - navigationGroup: administration - sections: - - type: endpoint - key: list-project-users - path: list - - type: endpoint - key: create-project-user - path: creeate - - type: endpoint - key: retrieve-project-user - path: retrieve - - type: endpoint - key: modify-project-user - path: modify - - type: endpoint - key: delete-project-user - path: delete - - type: object - key: ProjectUser - path: object - - id: project-service-accounts - title: Project Service Accounts - description: "Manage service accounts within a project. A service account is a bot user that is not associated with a user. \nIf a user leaves an organization, their keys and membership in projects will no longer work. Service accounts \ndo not have this limitation. However, service accounts can also be deleted from a project.\n" - navigationGroup: administration - sections: - - type: endpoint - key: list-project-service-accounts - path: list - - type: endpoint - key: create-project-service-account - path: create - - type: endpoint - key: retrieve-project-service-account - path: retrieve - - type: endpoint - key: delete-project-service-account - path: delete - - type: object - key: ProjectServiceAccount - path: object - - id: project-api-keys - title: Project API Keys - description: "Manage API keys for a given project. Supports listing and deleting keys for users. \nThis API does not allow issuing keys for users, as users need to authorize themselves to generate keys. \n" - navigationGroup: administration - sections: - - type: endpoint - key: list-project-api-keys - path: list - - type: endpoint - key: retrieve-project-api-key - path: retrieve - - type: endpoint - key: delete-project-api-key - path: delete - - type: object - key: ProjectApiKey - path: object - - id: audit-logs - title: Audit Logs - description: "Logs of user actions and configuration changes within this organization. \n\nTo log events, you must activate logging in the [Organization Settings](/settings/organization/general). \nOnce activated, for security reasons, logging cannot be deactivated.\n" - navigationGroup: administration - sections: - - type: endpoint - key: list-audit-logs - path: list - - type: object - key: AuditLog - path: object - - id: completions - title: Completions - legacy: true - navigationGroup: legacy - description: "Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models.\n" - sections: - - type: endpoint - key: createCompletion - path: create - - type: object - key: CreateCompletionResponse - path: object \ No newline at end of file + navigationGroups: + - id: endpoints + title: Endpoints + - id: assistants + title: Assistants + beta: true + - id: administration + title: Administration + - id: realtime + title: Realtime + beta: true + - id: legacy + title: Legacy + groups: + - id: audio + title: Audio + description: | + Learn how to turn audio into text or text into audio. + + Related guide: [Speech to text](/docs/guides/speech-to-text) + navigationGroup: endpoints + sections: + - type: endpoint + key: createSpeech + path: createSpeech + - type: endpoint + key: createTranscription + path: createTranscription + - type: endpoint + key: createTranslation + path: createTranslation + - type: object + key: CreateTranscriptionResponseJson + path: json-object + - type: object + key: CreateTranscriptionResponseVerboseJson + path: verbose-json-object + - id: chat + title: Chat + description: > + Given a list of messages comprising a conversation, the model will + return a response. + + Related guide: [Chat Completions](/docs/guides/text-generation) + navigationGroup: endpoints + sections: + - type: endpoint + key: createChatCompletion + path: create + - type: object + key: CreateChatCompletionResponse + path: object + - type: object + key: CreateChatCompletionStreamResponse + path: streaming + - id: embeddings + title: Embeddings + description: > + Get a vector representation of a given input that can be easily consumed + by machine learning models and algorithms. + + Related guide: [Embeddings](/docs/guides/embeddings) + navigationGroup: endpoints + sections: + - type: endpoint + key: createEmbedding + path: create + - type: object + key: Embedding + path: object + - id: fine-tuning + title: Fine-tuning + description: > + Manage fine-tuning jobs to tailor a model to your specific training + data. + + Related guide: [Fine-tune models](/docs/guides/fine-tuning) + navigationGroup: endpoints + sections: + - type: endpoint + key: createFineTuningJob + path: create + - type: endpoint + key: listPaginatedFineTuningJobs + path: list + - type: endpoint + key: listFineTuningEvents + path: list-events + - type: endpoint + key: listFineTuningJobCheckpoints + path: list-checkpoints + - type: endpoint + key: retrieveFineTuningJob + path: retrieve + - type: endpoint + key: cancelFineTuningJob + path: cancel + - type: object + key: FinetuneChatRequestInput + path: chat-input + - type: object + key: FinetuneCompletionRequestInput + path: completions-input + - type: object + key: FineTuningJob + path: object + - type: object + key: FineTuningJobEvent + path: event-object + - type: object + key: FineTuningJobCheckpoint + path: checkpoint-object + - id: batch + title: Batch + description: > + Create large batches of API requests for asynchronous processing. The + Batch API returns completions within 24 hours for a 50% discount. + + Related guide: [Batch](/docs/guides/batch) + navigationGroup: endpoints + sections: + - type: endpoint + key: createBatch + path: create + - type: endpoint + key: retrieveBatch + path: retrieve + - type: endpoint + key: cancelBatch + path: cancel + - type: endpoint + key: listBatches + path: list + - type: object + key: Batch + path: object + - type: object + key: BatchRequestInput + path: request-input + - type: object + key: BatchRequestOutput + path: request-output + - id: files + title: Files + description: > + Files are used to upload documents that can be used with features like + [Assistants](/docs/api-reference/assistants), + [Fine-tuning](/docs/api-reference/fine-tuning), and [Batch + API](/docs/guides/batch). + navigationGroup: endpoints + sections: + - type: endpoint + key: createFile + path: create + - type: endpoint + key: listFiles + path: list + - type: endpoint + key: retrieveFile + path: retrieve + - type: endpoint + key: deleteFile + path: delete + - type: endpoint + key: downloadFile + path: retrieve-contents + - type: object + key: OpenAIFile + path: object + - id: uploads + title: Uploads + description: | + Allows you to upload large files in multiple parts. + navigationGroup: endpoints + sections: + - type: endpoint + key: createUpload + path: create + - type: endpoint + key: addUploadPart + path: add-part + - type: endpoint + key: completeUpload + path: complete + - type: endpoint + key: cancelUpload + path: cancel + - type: object + key: Upload + path: object + - type: object + key: UploadPart + path: part-object + - id: images + title: Images + description: > + Given a prompt and/or an input image, the model will generate a new + image. + + Related guide: [Image generation](/docs/guides/images) + navigationGroup: endpoints + sections: + - type: endpoint + key: createImage + path: create + - type: endpoint + key: createImageEdit + path: createEdit + - type: endpoint + key: createImageVariation + path: createVariation + - type: object + key: Image + path: object + - id: models + title: Models + description: > + List and describe the various models available in the API. You can refer + to the [Models](/docs/models) documentation to understand what models + are available and the differences between them. + navigationGroup: endpoints + sections: + - type: endpoint + key: listModels + path: list + - type: endpoint + key: retrieveModel + path: retrieve + - type: endpoint + key: deleteModel + path: delete + - type: object + key: Model + path: object + - id: moderations + title: Moderations + description: > + Given text and/or image inputs, classifies if those inputs are + potentially harmful across several categories. + + Related guide: [Moderations](/docs/guides/moderation) + navigationGroup: endpoints + sections: + - type: endpoint + key: createModeration + path: create + - type: object + key: CreateModerationResponse + path: object + - id: assistants + title: Assistants + beta: true + description: | + Build assistants that can call models and use tools to perform tasks. + + [Get started with the Assistants API](/docs/assistants) + navigationGroup: assistants + sections: + - type: endpoint + key: createAssistant + path: createAssistant + - type: endpoint + key: listAssistants + path: listAssistants + - type: endpoint + key: getAssistant + path: getAssistant + - type: endpoint + key: modifyAssistant + path: modifyAssistant + - type: endpoint + key: deleteAssistant + path: deleteAssistant + - type: object + key: AssistantObject + path: object + - id: threads + title: Threads + beta: true + description: | + Create threads that assistants can interact with. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createThread + path: createThread + - type: endpoint + key: getThread + path: getThread + - type: endpoint + key: modifyThread + path: modifyThread + - type: endpoint + key: deleteThread + path: deleteThread + - type: object + key: ThreadObject + path: object + - id: messages + title: Messages + beta: true + description: | + Create messages within threads + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createMessage + path: createMessage + - type: endpoint + key: listMessages + path: listMessages + - type: endpoint + key: getMessage + path: getMessage + - type: endpoint + key: modifyMessage + path: modifyMessage + - type: endpoint + key: deleteMessage + path: deleteMessage + - type: object + key: MessageObject + path: object + - id: runs + title: Runs + beta: true + description: | + Represents an execution run on a thread. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createRun + path: createRun + - type: endpoint + key: createThreadAndRun + path: createThreadAndRun + - type: endpoint + key: listRuns + path: listRuns + - type: endpoint + key: getRun + path: getRun + - type: endpoint + key: modifyRun + path: modifyRun + - type: endpoint + key: submitToolOuputsToRun + path: submitToolOutputs + - type: endpoint + key: cancelRun + path: cancelRun + - type: object + key: RunObject + path: object + - id: run-steps + title: Run steps + beta: true + description: | + Represents the steps (model and tool calls) taken during the run. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: listRunSteps + path: listRunSteps + - type: endpoint + key: getRunStep + path: getRunStep + - type: object + key: RunStepObject + path: step-object + - id: vector-stores + title: Vector stores + beta: true + description: | + Vector stores are used to store files for use by the `file_search` tool. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStore + path: create + - type: endpoint + key: listVectorStores + path: list + - type: endpoint + key: getVectorStore + path: retrieve + - type: endpoint + key: modifyVectorStore + path: modify + - type: endpoint + key: deleteVectorStore + path: delete + - type: object + key: VectorStoreObject + path: object + - id: vector-stores-files + title: Vector store files + beta: true + description: | + Vector store files represent files inside a vector store. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStoreFile + path: createFile + - type: endpoint + key: listVectorStoreFiles + path: listFiles + - type: endpoint + key: getVectorStoreFile + path: getFile + - type: endpoint + key: deleteVectorStoreFile + path: deleteFile + - type: object + key: VectorStoreFileObject + path: file-object + - id: vector-stores-file-batches + title: Vector store file batches + beta: true + description: > + Vector store file batches represent operations to add multiple files to + a vector store. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStoreFileBatch + path: createBatch + - type: endpoint + key: getVectorStoreFileBatch + path: getBatch + - type: endpoint + key: cancelVectorStoreFileBatch + path: cancelBatch + - type: endpoint + key: listFilesInVectorStoreBatch + path: listBatchFiles + - type: object + key: VectorStoreFileBatchObject + path: batch-object + - id: assistants-streaming + title: Streaming + beta: true + description: > + Stream the result of executing a Run or resuming a Run after submitting + tool outputs. + + You can stream events from the [Create Thread and + Run](/docs/api-reference/runs/createThreadAndRun), + + [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool + Outputs](/docs/api-reference/runs/submitToolOutputs) + + endpoints by passing `"stream": true`. The response will be a + [Server-Sent + events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) + stream. + + Our Node and Python SDKs provide helpful utilities to make streaming + easy. Reference the + + [Assistants API quickstart](/docs/assistants/overview) to learn more. + navigationGroup: assistants + sections: + - type: object + key: MessageDeltaObject + path: message-delta-object + - type: object + key: RunStepDeltaObject + path: run-step-delta-object + - type: object + key: AssistantStreamEvent + path: events + - id: administration + title: Administration + description: > + Programmatically manage your organization. + + The Audit Logs endpoint provides a log of all actions taken in + the organization for security and monitoring purposes. + + To access these endpoints please generate an Admin API Key through the + [API Platform Organization overview](/organization/admin-keys). Admin + API keys cannot be used for non-administration endpoints. + + For best practices on setting up your organization, please refer to this + [guide](/docs/guides/production-best-practices#setting-up-your-organization) + navigationGroup: administration + - id: invite + title: Invites + description: Invite and manage invitations for an organization. Invited users + are automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-invites + path: list + - type: endpoint + key: inviteUser + path: create + - type: endpoint + key: retrieve-invite + path: retrieve + - type: endpoint + key: delete-invite + path: delete + - type: object + key: Invite + path: object + - id: users + title: Users + description: > + Manage users and their role in an organization. Users will be + automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-users + path: list + - type: endpoint + key: modify-user + path: modify + - type: endpoint + key: retrieve-user + path: retrieve + - type: endpoint + key: delete-user + path: delete + - type: object + key: User + path: object + - id: projects + title: Projects + description: > + Manage the projects within an orgnanization includes creation, updating, + and archiving or projects. + + The Default project cannot be modified or archived. + navigationGroup: administration + sections: + - type: endpoint + key: list-projects + path: list + - type: endpoint + key: create-project + path: create + - type: endpoint + key: retrieve-project + path: retrieve + - type: endpoint + key: modify-project + path: modify + - type: endpoint + key: archive-project + path: archive + - type: object + key: Project + path: object + - id: project-users + title: Project users + description: > + Manage users within a project, including adding, updating roles, and + removing users. + + Users cannot be removed from the Default project, unless they are being + removed from the organization. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-users + path: list + - type: endpoint + key: create-project-user + path: creeate + - type: endpoint + key: retrieve-project-user + path: retrieve + - type: endpoint + key: modify-project-user + path: modify + - type: endpoint + key: delete-project-user + path: delete + - type: object + key: ProjectUser + path: object + - id: project-service-accounts + title: Project service accounts + description: > + Manage service accounts within a project. A service account is a bot + user that is not associated with a user. + + If a user leaves an organization, their keys and membership in projects + will no longer work. Service accounts + + do not have this limitation. However, service accounts can also be + deleted from a project. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-service-accounts + path: list + - type: endpoint + key: create-project-service-account + path: create + - type: endpoint + key: retrieve-project-service-account + path: retrieve + - type: endpoint + key: delete-project-service-account + path: delete + - type: object + key: ProjectServiceAccount + path: object + - id: project-api-keys + title: Project API keys + description: > + Manage API keys for a given project. Supports listing and deleting keys + for users. + + This API does not allow issuing keys for users, as users need to + authorize themselves to generate keys. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-api-keys + path: list + - type: endpoint + key: retrieve-project-api-key + path: retrieve + - type: endpoint + key: delete-project-api-key + path: delete + - type: object + key: ProjectApiKey + path: object + - id: project-rate-limits + title: Project rate limits + description: > + Manage rate limits per model for projects. Rate limits may be configured + to be equal to or lower than the organization's rate limits. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-rate-limits + path: list + - type: endpoint + key: update-project-rate-limits + path: update + - type: object + key: ProjectRateLimit + path: object + - id: audit-logs + title: Audit logs + description: > + Logs of user actions and configuration changes within this + organization. + + To log events, you must activate logging in the [Organization + Settings](/settings/organization/general). + + Once activated, for security reasons, logging cannot be deactivated. + navigationGroup: administration + sections: + - type: endpoint + key: list-audit-logs + path: list + - type: object + key: AuditLog + path: object + - id: usage + title: Usage + description: > + The **Usage API** provides detailed insights into your activity across + the OpenAI API. It also includes a separate [Costs + endpoint](/docs/api-reference/usage/costs), which offers visibility into + your spend, breaking down consumption by invoice line items and project + IDs. + + + While the Usage API delivers granular usage data, it may not always + reconcile perfectly with the Costs due to minor differences in how usage + and spend are recorded. For financial purposes, we recommend using the + [Costs endpoint](/docs/api-reference/usage/costs) or the [Costs + tab](/settings/organization/usage) in the Usage Dashboard, which will + reconcile back to your billing invoice. + navigationGroup: administration + sections: + - type: endpoint + key: usage-completions + path: completions + - type: object + key: UsageCompletionsResult + path: completions_object + - type: endpoint + key: usage-embeddings + path: embeddings + - type: object + key: UsageEmbeddingsResult + path: embeddings_object + - type: endpoint + key: usage-moderations + path: moderations + - type: object + key: UsageModerationsResult + path: moderations_object + - type: endpoint + key: usage-images + path: images + - type: object + key: UsageImagesResult + path: images_object + - type: endpoint + key: usage-audio-speeches + path: audio_speeches + - type: object + key: UsageAudioSpeechesResult + path: audio_speeches_object + - type: endpoint + key: usage-audio-transcriptions + path: audio_transcriptions + - type: object + key: UsageAudioTranscriptionsResult + path: audio_transcriptions_object + - type: endpoint + key: usage-vector-stores + path: vector_stores + - type: object + key: UsageVectorStoresResult + path: vector_stores_object + - type: endpoint + key: usage-code-interpreter-sessions + path: code_interpreter_sessions + - type: object + key: UsageCodeInterpreterSessionsResult + path: code_interpreter_sessions_object + - type: endpoint + key: usage-costs + path: costs + - type: object + key: CostsResult + path: costs_object + - id: realtime + title: Realtime + beta: true + description: > + Communicate with a GPT-4o class model live, in real time, over + WebSocket. + + Produces both audio and text transcriptions. + + [Learn more about the Realtime API](/docs/guides/realtime). + navigationGroup: realtime + - id: realtime-client-events + title: Client events + description: > + These are events that the OpenAI Realtime WebSocket server will accept + from the client. + navigationGroup: realtime + sections: + - type: object + key: RealtimeClientEventSessionUpdate + path: + - type: object + key: RealtimeClientEventInputAudioBufferAppend + path: + - type: object + key: RealtimeClientEventInputAudioBufferCommit + path: + - type: object + key: RealtimeClientEventInputAudioBufferClear + path: + - type: object + key: RealtimeClientEventConversationItemCreate + path: + - type: object + key: RealtimeClientEventConversationItemTruncate + path: + - type: object + key: RealtimeClientEventConversationItemDelete + path: + - type: object + key: RealtimeClientEventResponseCreate + path: + - type: object + key: RealtimeClientEventResponseCancel + path: + - id: realtime-server-events + title: Server events + description: > + These are events emitted from the OpenAI Realtime WebSocket server to + the client. + navigationGroup: realtime + sections: + - type: object + key: RealtimeServerEventError + path: + - type: object + key: RealtimeServerEventSessionCreated + path: + - type: object + key: RealtimeServerEventSessionUpdated + path: + - type: object + key: RealtimeServerEventConversationCreated + path: + - type: object + key: RealtimeServerEventConversationItemCreated + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionFailed + path: + - type: object + key: RealtimeServerEventConversationItemTruncated + path: + - type: object + key: RealtimeServerEventConversationItemDeleted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCommitted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCleared + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStarted + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStopped + path: + - type: object + key: RealtimeServerEventResponseCreated + path: + - type: object + key: RealtimeServerEventResponseDone + path: + - type: object + key: RealtimeServerEventResponseOutputItemAdded + path: + - type: object + key: RealtimeServerEventResponseOutputItemDone + path: + - type: object + key: RealtimeServerEventResponseContentPartAdded + path: + - type: object + key: RealtimeServerEventResponseContentPartDone + path: + - type: object + key: RealtimeServerEventResponseTextDelta + path: + - type: object + key: RealtimeServerEventResponseTextDone + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDelta + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDone + path: + - type: object + key: RealtimeServerEventResponseAudioDelta + path: + - type: object + key: RealtimeServerEventResponseAudioDone + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDelta + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDone + path: + - type: object + key: RealtimeServerEventRateLimitsUpdated + path: + - id: completions + title: Completions + legacy: true + navigationGroup: legacy + description: > + Given a prompt, the model will return one or more predicted completions + along with the probabilities of alternative tokens at each position. + Most developer should use our [Chat Completions + API](/docs/guides/text-generation#text-generation-models) to leverage + our best and newest models. + sections: + - type: endpoint + key: createCompletion + path: create + - type: object + key: CreateCompletionResponse + path: object diff --git a/src/libs/AutoSDK/Extensions/StringExtensions.cs b/src/libs/AutoSDK/Extensions/StringExtensions.cs index a7326b595c..d2b397e44a 100644 --- a/src/libs/AutoSDK/Extensions/StringExtensions.cs +++ b/src/libs/AutoSDK/Extensions/StringExtensions.cs @@ -62,6 +62,8 @@ public static string ToParameterName(this string input) "" => string.Empty, "Event" => "@event", "event" => "@event", + "Object" => "@object", + "object" => "@object", "Namespace" => "@namespace", "namespace" => "@namespace", #pragma warning disable CA1308 // Normalize strings to uppercase diff --git a/src/libs/AutoSDK/Sources/Sources.Models.AnyOf.cs b/src/libs/AutoSDK/Sources/Sources.Models.AnyOf.cs index c943e86132..a72b8f174c 100644 --- a/src/libs/AutoSDK/Sources/Sources.Models.AnyOf.cs +++ b/src/libs/AutoSDK/Sources/Sources.Models.AnyOf.cs @@ -53,6 +53,11 @@ public static string GenerateAnyOf( {x.Name} = {x.ParameterName}; ").Inject()} }}" : " "; + var objectProperty = + anyOfData.Properties.Any(x => x.ParameterName == "object") || + anyOfData.DiscriminatorPropertyName == "Object" + ? "Object1" + : "Object"; return $@"{(anyOfData.IsNamed ? @"#pragma warning disable CS0618 // Type or member is obsolete " : "")} @@ -102,7 +107,7 @@ namespace {anyOfData.Namespace} {constructorWithAllValues} {string.Empty.ToXmlDocumentationSummary(level: 8)} - public object? Object => + public object? {objectProperty} => {anyOfData.Properties.Reverse().Select(x => $@" {x.Name} as object ?? ").Inject().TrimEnd('?', '\n')} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs index d5757cdae1..168402d86a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs @@ -162,7 +162,7 @@ partial void ProcessCreateAssistantResponseContent( /// Create an assistant with a model and instructions. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -175,28 +175,30 @@ partial void ProcessCreateAssistantResponseContent( /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateMessage.g.verified.cs index 00e71048f7..2d65031f5b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateMessage.g.verified.cs @@ -178,7 +178,7 @@ partial void ProcessCreateMessageResponseContent( /// A list of files attached to the message, and the tools they should be added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateRun.g.verified.cs index 86faad506a..de05c7aed0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateRun.g.verified.cs @@ -9,11 +9,13 @@ public partial class AssistantsClient partial void PrepareCreateRunArguments( global::System.Net.Http.HttpClient httpClient, ref string threadId, + global::System.Collections.Generic.IList? include, global::G.CreateRunRequest request); partial void PrepareCreateRunRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, string threadId, + global::System.Collections.Generic.IList? include, global::G.CreateRunRequest request); partial void ProcessCreateRunResponse( global::System.Net.Http.HttpClient httpClient, @@ -28,12 +30,14 @@ partial void ProcessCreateRunResponseContent( /// Create a run. /// /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task CreateRunAsync( string threadId, global::G.CreateRunRequest request, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default) { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); @@ -43,6 +47,7 @@ partial void ProcessCreateRunResponseContent( PrepareCreateRunArguments( httpClient: HttpClient, threadId: ref threadId, + include: include, request: request); var __pathBuilder = new PathBuilder( @@ -86,6 +91,7 @@ partial void ProcessCreateRunResponseContent( httpClient: HttpClient, httpRequestMessage: __httpRequest, threadId: threadId, + include: include, request: request); using var __response = await HttpClient.SendAsync( @@ -168,6 +174,7 @@ partial void ProcessCreateRunResponseContent( /// Create a run. /// /// + /// /// /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. /// @@ -188,15 +195,16 @@ partial void ProcessCreateRunResponseContent( /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -220,12 +228,13 @@ partial void ProcessCreateRunResponseContent( /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -233,11 +242,12 @@ partial void ProcessCreateRunResponseContent( public async global::System.Threading.Tasks.Task CreateRunAsync( string threadId, string assistantId, + global::System.Collections.Generic.IList? include = default, global::G.AnyOf? model = default, string? instructions = default, string? additionalInstructions = default, global::System.Collections.Generic.IList? additionalMessages = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, object? metadata = default, double? temperature = default, double? topP = default, @@ -272,6 +282,7 @@ partial void ProcessCreateRunResponseContent( return await CreateRunAsync( threadId: threadId, + include: include, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThread.g.verified.cs index 07faf634a8..5d2f93aff8 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThread.g.verified.cs @@ -168,7 +168,7 @@ partial void ProcessCreateThreadResponseContent( /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs index 01369781ca..44df1f0e7f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs @@ -179,15 +179,16 @@ partial void ProcessCreateThreadAndRunResponseContent( /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -211,12 +212,13 @@ partial void ProcessCreateThreadAndRunResponseContent( /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -226,7 +228,7 @@ partial void ProcessCreateThreadAndRunResponseContent( global::G.CreateThreadRequest? thread = default, global::G.AnyOf? model = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.CreateThreadAndRunRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.GetRunStep.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.GetRunStep.g.verified.cs index 1fd705ae07..fc31495109 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.GetRunStep.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.GetRunStep.g.verified.cs @@ -10,13 +10,15 @@ partial void PrepareGetRunStepArguments( global::System.Net.Http.HttpClient httpClient, ref string threadId, ref string runId, - ref string stepId); + ref string stepId, + global::System.Collections.Generic.IList? include); partial void PrepareGetRunStepRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, string threadId, string runId, - string stepId); + string stepId, + global::System.Collections.Generic.IList? include); partial void ProcessGetRunStepResponse( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpResponseMessage httpResponseMessage); @@ -32,12 +34,14 @@ partial void ProcessGetRunStepResponseContent( /// /// /// + /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task GetRunStepAsync( string threadId, string runId, string stepId, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default) { PrepareArguments( @@ -46,7 +50,8 @@ partial void ProcessGetRunStepResponseContent( httpClient: HttpClient, threadId: ref threadId, runId: ref runId, - stepId: ref stepId); + stepId: ref stepId, + include: include); var __pathBuilder = new PathBuilder( path: $"/threads/{threadId}/runs/{runId}/steps/{stepId}", @@ -84,7 +89,8 @@ partial void ProcessGetRunStepResponseContent( httpRequestMessage: __httpRequest, threadId: threadId, runId: runId, - stepId: stepId); + stepId: stepId, + include: include); using var __response = await HttpClient.SendAsync( request: __httpRequest, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs index 570c6dce66..361fb8bdd2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs @@ -13,7 +13,8 @@ partial void PrepareListRunStepsArguments( ref int? limit, ref global::G.ListRunStepsOrder? order, ref string? after, - ref string? before); + ref string? before, + global::System.Collections.Generic.IList? include); partial void PrepareListRunStepsRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, @@ -22,7 +23,8 @@ partial void PrepareListRunStepsRequest( int? limit, global::G.ListRunStepsOrder? order, string? after, - string? before); + string? before, + global::System.Collections.Generic.IList? include); partial void ProcessListRunStepsResponse( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpResponseMessage httpResponseMessage); @@ -45,6 +47,7 @@ partial void ProcessListRunStepsResponseContent( /// /// /// + /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ListRunStepsAsync( @@ -54,6 +57,7 @@ partial void ProcessListRunStepsResponseContent( global::G.ListRunStepsOrder? order = default, string? after = default, string? before = default, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default) { PrepareArguments( @@ -65,7 +69,8 @@ partial void ProcessListRunStepsResponseContent( limit: ref limit, order: ref order, after: ref after, - before: ref before); + before: ref before, + include: include); var orderValue = order switch { @@ -118,7 +123,8 @@ partial void ProcessListRunStepsResponseContent( limit: limit, order: order, after: after, - before: before); + before: before, + include: include); using var __response = await HttpClient.SendAsync( request: __httpRequest, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs index e9008f683b..60a9e04852 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs @@ -169,7 +169,7 @@ partial void ProcessModifyAssistantResponseContent( /// /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -181,28 +181,30 @@ partial void ProcessModifyAssistantResponseContent( /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -213,7 +215,7 @@ partial void ProcessModifyAssistantResponseContent( string? name = default, string? description = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.ModifyAssistantRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs index 60649ae865..38713f6103 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs @@ -176,7 +176,7 @@ partial void ProcessModifyMessageResponseContent( /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyRun.g.verified.cs index 89de69a4e5..ec62491732 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyRun.g.verified.cs @@ -176,7 +176,7 @@ partial void ProcessModifyRunResponseContent( /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyThread.g.verified.cs index d458a4d3df..4bd0172890 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AssistantsClient.ModifyThread.g.verified.cs @@ -172,7 +172,7 @@ partial void ProcessModifyThreadResponseContent( /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateSpeech.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateSpeech.g.verified.cs index 75b7d128f5..284992af4b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateSpeech.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateSpeech.g.verified.cs @@ -153,13 +153,13 @@ partial void ProcessCreateSpeechResponseContent( /// Generates audio from the input text. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranscription.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranscription.g.verified.cs index e6dbc847a8..9dbe8d21fa 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranscription.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranscription.g.verified.cs @@ -208,10 +208,10 @@ partial void ProcessCreateTranscriptionResponseContent( /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -230,7 +230,7 @@ partial void ProcessCreateTranscriptionResponseContent( global::G.AnyOf model, string? language = default, string? prompt = default, - global::G.CreateTranscriptionRequestResponseFormat? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Collections.Generic.IList? timestampGranularities = default, global::System.Threading.CancellationToken cancellationToken = default) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranslation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranslation.g.verified.cs index 4d2f5246f4..dd34f90dcf 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranslation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.AudioClient.CreateTranslation.g.verified.cs @@ -84,7 +84,7 @@ partial void ProcessCreateTranslationResponseContent( if (request.ResponseFormat != default) { __httpRequestContent.Add( - content: new global::System.Net.Http.StringContent($"{request.ResponseFormat}"), + content: new global::System.Net.Http.StringContent($"{request.ResponseFormat?.ToValueString()}"), name: "response_format"); } if (request.Temperature != default) @@ -193,10 +193,10 @@ partial void ProcessCreateTranslationResponseContent( /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -210,7 +210,7 @@ partial void ProcessCreateTranslationResponseContent( string filename, global::G.AnyOf model, string? prompt = default, - string? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Threading.CancellationToken cancellationToken = default) { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.BatchClient.CreateBatch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.BatchClient.CreateBatch.g.verified.cs index 2eddaabc64..96b10ae1e7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.BatchClient.CreateBatch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.BatchClient.CreateBatch.g.verified.cs @@ -164,7 +164,7 @@ partial void ProcessCreateBatchResponseContent( /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. /// /// /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs index e7a25aeba0..fd409d7619 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs @@ -23,7 +23,9 @@ partial void ProcessCreateChatCompletionResponseContent( ref string content); /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// /// The token to cancel the operation with @@ -159,18 +161,32 @@ partial void ProcessCreateChatCompletionResponseContent( } /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -184,24 +200,42 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. /// - /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). /// /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// @@ -211,10 +245,12 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -250,10 +286,11 @@ partial void ProcessCreateChatCompletionResponseContent( /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with @@ -261,12 +298,17 @@ partial void ProcessCreateChatCompletionResponseContent( public async global::System.Threading.Tasks.Task CreateChatCompletionAsync( global::System.Collections.Generic.IList messages, global::G.AnyOf model, + bool? store = default, + global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = default, global::System.Collections.Generic.Dictionary? logitBias = default, bool? logprobs = default, int? topLogprobs = default, - int? maxTokens = default, + int? maxCompletionTokens = default, int? n = default, + global::System.Collections.Generic.IList? modalities = default, + global::G.PredictionContent? prediction = default, + global::G.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = default, global::G.ResponseFormat? responseFormat = default, int? seed = default, @@ -286,12 +328,17 @@ partial void ProcessCreateChatCompletionResponseContent( { Messages = messages, Model = model, + Store = store, + Metadata = metadata, FrequencyPenalty = frequencyPenalty, LogitBias = logitBias, Logprobs = logprobs, TopLogprobs = topLogprobs, - MaxTokens = maxTokens, + MaxCompletionTokens = maxCompletionTokens, N = n, + Modalities = modalities, + Prediction = prediction, + Audio = audio, PresencePenalty = presencePenalty, ResponseFormat = responseFormat, Seed = seed, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs index e89f5308d2..76ad6555ac 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs @@ -162,7 +162,7 @@ partial void ProcessCreateCompletionResponseContent( /// Creates a completion for the provided prompt and parameters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -181,7 +181,7 @@ partial void ProcessCreateCompletionResponseContent( /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -207,7 +207,7 @@ partial void ProcessCreateCompletionResponseContent( /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -242,7 +242,7 @@ partial void ProcessCreateCompletionResponseContent( /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs index ad41044d12..d1a94e7bfd 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs @@ -166,7 +166,7 @@ partial void ProcessCreateEmbeddingResponseContent( /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -178,7 +178,7 @@ partial void ProcessCreateEmbeddingResponseContent( /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.CreateFile.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.CreateFile.g.verified.cs index 0444eb38bb..4141d9318f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.CreateFile.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.CreateFile.g.verified.cs @@ -26,7 +26,7 @@ partial void ProcessCreateFileResponseContent( /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// @@ -169,7 +169,7 @@ partial void ProcessCreateFileResponseContent( /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.ListFiles.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.ListFiles.g.verified.cs index e38491589f..d5fb537032 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.ListFiles.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FilesClient.ListFiles.g.verified.cs @@ -8,11 +8,17 @@ public partial class FilesClient { partial void PrepareListFilesArguments( global::System.Net.Http.HttpClient httpClient, - ref string? purpose); + ref string? purpose, + ref int? limit, + ref global::G.ListFilesOrder? order, + ref string? after); partial void PrepareListFilesRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, - string? purpose); + string? purpose, + int? limit, + global::G.ListFilesOrder? order, + string? after); partial void ProcessListFilesResponse( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpResponseMessage httpResponseMessage); @@ -23,26 +29,48 @@ partial void ProcessListFilesResponseContent( ref string content); /// - /// Returns a list of files that belong to the user's organization. + /// Returns a list of files. /// /// + /// + /// Default Value: 10000 + /// + /// + /// Default Value: desc + /// + /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ListFilesAsync( string? purpose = default, + int? limit = default, + global::G.ListFilesOrder? order = default, + string? after = default, global::System.Threading.CancellationToken cancellationToken = default) { PrepareArguments( client: HttpClient); PrepareListFilesArguments( httpClient: HttpClient, - purpose: ref purpose); + purpose: ref purpose, + limit: ref limit, + order: ref order, + after: ref after); + var orderValue = order switch + { + global::G.ListFilesOrder.Asc => "asc", + global::G.ListFilesOrder.Desc => "desc", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; var __pathBuilder = new PathBuilder( path: "/files", baseUri: HttpClient.BaseAddress); __pathBuilder .AddOptionalParameter("purpose", purpose) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("order", orderValue?.ToString()) + .AddOptionalParameter("after", after) ; var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -75,7 +103,10 @@ partial void ProcessListFilesResponseContent( PrepareListFilesRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, - purpose: purpose); + purpose: purpose, + limit: limit, + order: order, + after: after); using var __response = await HttpClient.SendAsync( request: __httpRequest, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs index f032bb50a9..f0aa6c457f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs @@ -167,7 +167,7 @@ partial void ProcessCreateFineTuningJobResponseContent( /// /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// @@ -182,7 +182,7 @@ partial void ProcessCreateFineTuningJobResponseContent( /// The hyperparameters used for the fine-tuning job. /// /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs index 9f763f754a..ca0ad570b9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs @@ -19,7 +19,7 @@ public partial interface IAssistantsClient /// Create an assistant with a model and instructions. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -32,28 +32,30 @@ public partial interface IAssistantsClient /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs index 2602ad643c..c852257423 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs @@ -31,7 +31,7 @@ public partial interface IAssistantsClient /// A list of files attached to the message, and the tools they should be added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateRun.g.verified.cs index f9d1d0a2ba..a1526d2a94 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateRun.g.verified.cs @@ -9,18 +9,21 @@ public partial interface IAssistantsClient /// Create a run. /// /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task CreateRunAsync( string threadId, global::G.CreateRunRequest request, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default); /// /// Create a run. /// /// + /// /// /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. /// @@ -41,15 +44,16 @@ public partial interface IAssistantsClient /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -73,12 +77,13 @@ public partial interface IAssistantsClient /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -86,11 +91,12 @@ public partial interface IAssistantsClient global::System.Threading.Tasks.Task CreateRunAsync( string threadId, string assistantId, + global::System.Collections.Generic.IList? include = default, global::G.AnyOf? model = default, string? instructions = default, string? additionalInstructions = default, global::System.Collections.Generic.IList? additionalMessages = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, object? metadata = default, double? temperature = default, double? topP = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThread.g.verified.cs index 309b51fe53..97a1855b2f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThread.g.verified.cs @@ -25,7 +25,7 @@ public partial interface IAssistantsClient /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs index 79abfc7f97..ca4112a52e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs @@ -36,15 +36,16 @@ public partial interface IAssistantsClient /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -68,12 +69,13 @@ public partial interface IAssistantsClient /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -83,7 +85,7 @@ public partial interface IAssistantsClient global::G.CreateThreadRequest? thread = default, global::G.AnyOf? model = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.CreateThreadAndRunRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs index af17024963..3ebd9ea725 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs @@ -11,12 +11,14 @@ public partial interface IAssistantsClient /// /// /// + /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task GetRunStepAsync( string threadId, string runId, string stepId, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs index 26a92ef80f..1eb0e7b347 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs @@ -18,6 +18,7 @@ public partial interface IAssistantsClient /// /// /// + /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ListRunStepsAsync( @@ -27,6 +28,7 @@ public partial interface IAssistantsClient global::G.ListRunStepsOrder? order = default, string? after = default, string? before = default, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs index 3e026d82e1..5a290d1b42 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs @@ -22,7 +22,7 @@ public partial interface IAssistantsClient /// /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -34,28 +34,30 @@ public partial interface IAssistantsClient /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -66,7 +68,7 @@ public partial interface IAssistantsClient string? name = default, string? description = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.ModifyAssistantRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs index 86dc28695d..5162783c1b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs @@ -25,7 +25,7 @@ public partial interface IAssistantsClient /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs index 70a137d555..9972e65298 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs @@ -25,7 +25,7 @@ public partial interface IAssistantsClient /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs index c37342e72e..bf1c71dfcb 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs @@ -25,7 +25,7 @@ public partial interface IAssistantsClient /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateSpeech.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateSpeech.g.verified.cs index d9eec0f9a3..1d8f35d201 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateSpeech.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateSpeech.g.verified.cs @@ -19,13 +19,13 @@ public partial interface IAudioClient /// Generates audio from the input text. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranscription.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranscription.g.verified.cs index 2dbc484983..a7ba0231fe 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranscription.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranscription.g.verified.cs @@ -32,10 +32,10 @@ public partial interface IAudioClient /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -54,7 +54,7 @@ public partial interface IAudioClient global::G.AnyOf model, string? language = default, string? prompt = default, - global::G.CreateTranscriptionRequestResponseFormat? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Collections.Generic.IList? timestampGranularities = default, global::System.Threading.CancellationToken cancellationToken = default); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranslation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranslation.g.verified.cs index 15a6423711..f564fb5835 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranslation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IAudioClient.CreateTranslation.g.verified.cs @@ -29,10 +29,10 @@ public partial interface IAudioClient /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -46,7 +46,7 @@ public partial interface IAudioClient string filename, global::G.AnyOf model, string? prompt = default, - string? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IBatchClient.CreateBatch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IBatchClient.CreateBatch.g.verified.cs index 438904dc82..f91e31bcd4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IBatchClient.CreateBatch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IBatchClient.CreateBatch.g.verified.cs @@ -21,7 +21,7 @@ public partial interface IBatchClient /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. /// /// /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs index 40374c5b83..1208ddb1c7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs @@ -6,7 +6,9 @@ namespace G public partial interface IChatClient { /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// /// The token to cancel the operation with @@ -16,18 +18,32 @@ public partial interface IChatClient global::System.Threading.CancellationToken cancellationToken = default); /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -41,24 +57,42 @@ public partial interface IChatClient /// /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. /// - /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). /// /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// @@ -68,10 +102,12 @@ public partial interface IChatClient /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -107,10 +143,11 @@ public partial interface IChatClient /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with @@ -118,12 +155,17 @@ public partial interface IChatClient global::System.Threading.Tasks.Task CreateChatCompletionAsync( global::System.Collections.Generic.IList messages, global::G.AnyOf model, + bool? store = default, + global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = default, global::System.Collections.Generic.Dictionary? logitBias = default, bool? logprobs = default, int? topLogprobs = default, - int? maxTokens = default, + int? maxCompletionTokens = default, int? n = default, + global::System.Collections.Generic.IList? modalities = default, + global::G.PredictionContent? prediction = default, + global::G.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = default, global::G.ResponseFormat? responseFormat = default, int? seed = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs index 37272bff75..c732eab3a6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs @@ -19,7 +19,7 @@ public partial interface ICompletionsClient /// Creates a completion for the provided prompt and parameters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -38,7 +38,7 @@ public partial interface ICompletionsClient /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -64,7 +64,7 @@ public partial interface ICompletionsClient /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -99,7 +99,7 @@ public partial interface ICompletionsClient /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs index 28ba9a7375..b2bb84e27d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs @@ -23,7 +23,7 @@ public partial interface IEmbeddingsClient /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -35,7 +35,7 @@ public partial interface IEmbeddingsClient /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.CreateFile.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.CreateFile.g.verified.cs index 5a05982dd5..1d164a64ab 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.CreateFile.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.CreateFile.g.verified.cs @@ -9,7 +9,7 @@ public partial interface IFilesClient /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// @@ -23,7 +23,7 @@ public partial interface IFilesClient /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.ListFiles.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.ListFiles.g.verified.cs index cc2eab9d69..e2c309e8e6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.ListFiles.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFilesClient.ListFiles.g.verified.cs @@ -6,13 +6,23 @@ namespace G public partial interface IFilesClient { /// - /// Returns a list of files that belong to the user's organization. + /// Returns a list of files. /// /// + /// + /// Default Value: 10000 + /// + /// + /// Default Value: desc + /// + /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ListFilesAsync( string? purpose = default, + int? limit = default, + global::G.ListFilesOrder? order = default, + string? after = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs index f8a8b50942..5abc7ac681 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs @@ -24,7 +24,7 @@ public partial interface IFineTuningClient /// /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// @@ -39,7 +39,7 @@ public partial interface IFineTuningClient /// The hyperparameters used for the fine-tuning job. /// /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImage.g.verified.cs index 1b37a636ca..1e547983ae 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImage.g.verified.cs @@ -53,7 +53,7 @@ public partial interface IImagesClient /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs index 3085e2750a..a483f69261 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs @@ -55,7 +55,7 @@ public partial interface IImagesClient /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs index 03b886e52f..cfd7a6351b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs @@ -45,7 +45,7 @@ public partial interface IImagesClient /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModelsClient.RetrieveModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModelsClient.RetrieveModel.g.verified.cs index ca76ad57bf..67c75ae00b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModelsClient.RetrieveModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModelsClient.RetrieveModel.g.verified.cs @@ -13,7 +13,7 @@ public partial interface IModelsClient /// /// The token to cancel the operation with /// - global::System.Threading.Tasks.Task RetrieveModelAsync( + global::System.Threading.Tasks.Task RetrieveModelAsync( string model, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.CreateModeration.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.CreateModeration.g.verified.cs index 295dbd10f4..764d6718ec 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.CreateModeration.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.CreateModeration.g.verified.cs @@ -6,7 +6,8 @@ namespace G public partial interface IModerationsClient { /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// /// The token to cancel the operation with @@ -16,21 +17,24 @@ public partial interface IModerationsClient global::System.Threading.CancellationToken cancellationToken = default); /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. /// /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task CreateModerationAsync( - global::G.OneOf> input, + global::G.OneOf, global::System.Collections.Generic.IList> input, global::G.AnyOf? model = default, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.g.verified.cs index 9e6725c81a..82de270493 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IModerationsClient.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Given a input text, outputs if the model classifies it as potentially harmful.
+ /// Given text and/or image inputs, classifies if those inputs are potentially harmful.
/// If no httpClient is provided, a new one will be created.
/// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. ///
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IOpenAiClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IOpenAiClient.g.verified.cs index 156f44dcfa..b84bb44ee6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IOpenAiClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IOpenAiClient.g.verified.cs @@ -94,7 +94,7 @@ public partial interface IOpenAiClient : global::System.IDisposable public ModelsClient Models { get; } /// - /// Given a input text, outputs if the model classifies it as potentially harmful. + /// Given text and/or image inputs, classifies if those inputs are potentially harmful. /// public ModerationsClient Moderations { get; } @@ -106,13 +106,18 @@ public partial interface IOpenAiClient : global::System.IDisposable /// /// /// - public VectorStoresClient VectorStores { get; } + public UsageClient Usage { get; } /// /// /// public InvitesClient Invites { get; } + /// + /// + /// + public ProjectsClient Projects { get; } + /// /// /// @@ -121,7 +126,7 @@ public partial interface IOpenAiClient : global::System.IDisposable /// /// /// - public ProjectsClient Projects { get; } + public VectorStoresClient VectorStores { get; } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ListProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ListProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..bfdce37006 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ListProjectRateLimits.g.verified.cs @@ -0,0 +1,26 @@ +//HintName: G.IProjectsClient.ListProjectRateLimits.g.cs +#nullable enable + +namespace G +{ + public partial interface IProjectsClient + { + /// + /// Returns the rate limits per model for a project. + /// + /// + /// + /// Default Value: 100 + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task ListProjectRateLimitsAsync( + string projectId, + int? limit = default, + string? after = default, + string? before = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProject.g.verified.cs index 6004bae4ce..618fbedc84 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProject.g.verified.cs @@ -8,22 +8,26 @@ public partial interface IProjectsClient /// /// Modifies a project in the organization. /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, global::G.ProjectUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a project in the organization. /// + /// /// /// The updated name of the project, this name appears in reports. /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, string name, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs index b0a947b701..a23110ac65 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs @@ -8,22 +8,30 @@ public partial interface IProjectsClient /// /// Modifies a user's role in the project. /// + /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a user's role in the project. /// + /// + /// /// /// `owner` or `member` /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.UpdateProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.UpdateProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..347e85901d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IProjectsClient.UpdateProjectRateLimits.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.IProjectsClient.UpdateProjectRateLimits.g.cs +#nullable enable + +namespace G +{ + public partial interface IProjectsClient + { + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request, + global::System.Threading.CancellationToken cancellationToken = default); + + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + int? maxRequestsPer1Minute = default, + int? maxTokensPer1Minute = default, + int? maxImagesPer1Minute = default, + int? maxAudioMegabytesPer1Minute = default, + int? maxRequestsPer1Day = default, + int? batch1DayMaxInputTokens = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUploadsClient.CreateUpload.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUploadsClient.CreateUpload.g.verified.cs index 127b8ce96a..40bea117be 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUploadsClient.CreateUpload.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUploadsClient.CreateUpload.g.verified.cs @@ -9,7 +9,7 @@ public partial interface IUploadsClient /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// @@ -23,7 +23,7 @@ public partial interface IUploadsClient /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageAudioSpeeches.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageAudioSpeeches.g.verified.cs new file mode 100644 index 0000000000..c171fdadcc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageAudioSpeeches.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageAudioSpeeches.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get audio speeches usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageAudioSpeechesAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioSpeechesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageAudioTranscriptions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageAudioTranscriptions.g.verified.cs new file mode 100644 index 0000000000..2626dd8499 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageAudioTranscriptions.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageAudioTranscriptions.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get audio transcriptions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageAudioTranscriptionsAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCodeInterpreterSessions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCodeInterpreterSessions.g.verified.cs new file mode 100644 index 0000000000..c6aaf40e5d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCodeInterpreterSessions.g.verified.cs @@ -0,0 +1,32 @@ +//HintName: G.IUsageClient.UsageCodeInterpreterSessions.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get code interpreter sessions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageCodeInterpreterSessionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCompletions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCompletions.g.verified.cs new file mode 100644 index 0000000000..547acd02b9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCompletions.g.verified.cs @@ -0,0 +1,40 @@ +//HintName: G.IUsageClient.UsageCompletions.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get completions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageCompletionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCompletionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + bool? batch = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCosts.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCosts.g.verified.cs new file mode 100644 index 0000000000..96ebd4b506 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageCosts.g.verified.cs @@ -0,0 +1,34 @@ +//HintName: G.IUsageClient.UsageCosts.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get costs details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// Default Value: 7 + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageCostsAsync( + int startTime, + int? endTime = default, + global::G.UsageCostsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageEmbeddings.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageEmbeddings.g.verified.cs new file mode 100644 index 0000000000..4445eddb06 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageEmbeddings.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageEmbeddings.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get embeddings usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageEmbeddingsAsync( + int startTime, + int? endTime = default, + global::G.UsageEmbeddingsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageImages.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageImages.g.verified.cs new file mode 100644 index 0000000000..687af2a335 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageImages.g.verified.cs @@ -0,0 +1,42 @@ +//HintName: G.IUsageClient.UsageImages.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get images usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageImagesAsync( + int startTime, + int? endTime = default, + global::G.UsageImagesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? sources = default, + global::System.Collections.Generic.IList? sizes = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageModerations.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageModerations.g.verified.cs new file mode 100644 index 0000000000..f8afc70d6a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageModerations.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageModerations.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get moderations usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageModerationsAsync( + int startTime, + int? endTime = default, + global::G.UsageModerationsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageVectorStores.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageVectorStores.g.verified.cs new file mode 100644 index 0000000000..0bd84589db --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.UsageVectorStores.g.verified.cs @@ -0,0 +1,32 @@ +//HintName: G.IUsageClient.UsageVectorStores.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get vector stores usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageVectorStoresAsync( + int startTime, + int? endTime = default, + global::G.UsageVectorStoresBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.g.verified.cs new file mode 100644 index 0000000000..51ce3e3b06 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsageClient.g.verified.cs @@ -0,0 +1,41 @@ +//HintName: G.IUsageClient.g.cs + +#nullable enable + +namespace G +{ + /// + /// If no httpClient is provided, a new one will be created.
+ /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + ///
+ public partial interface IUsageClient : global::System.IDisposable + { + /// + /// The HttpClient instance. + /// + public global::System.Net.Http.HttpClient HttpClient { get; } + + /// + /// The base URL for the API. + /// + public System.Uri? BaseUri { get; } + + /// + /// The authorizations to use for the requests. + /// + public global::System.Collections.Generic.List Authorizations { get; } + + /// + /// Gets or sets a value indicating whether the response content should be read as a string. + /// True by default in debug builds, false otherwise. + /// + public bool ReadResponseAsString { get; set; } + + /// + /// + /// + global::Newtonsoft.Json.JsonSerializerSettings JsonSerializerOptions { get; set; } + + + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsersClient.ModifyUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsersClient.ModifyUser.g.verified.cs index 9052ca9347..fa2bdf58e5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsersClient.ModifyUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IUsersClient.ModifyUser.g.verified.cs @@ -8,22 +8,26 @@ public partial interface IUsersClient /// /// Modifies a user's role in the organization. /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a user's role in the organization. /// + /// /// /// `owner` or `reader` /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs index 70b75f7a53..d856c0d320 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs @@ -31,7 +31,7 @@ public partial interface IVectorStoresClient /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs index e4af066157..57db10482a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs @@ -28,7 +28,7 @@ public partial interface IVectorStoresClient /// The expiration policy for a vector store. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImage.g.verified.cs index 7119a2fa61..8d7d1d00d7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImage.g.verified.cs @@ -196,7 +196,7 @@ partial void ProcessCreateImageResponseContent( /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs index 087df9d445..a65b8bbf85 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs @@ -238,7 +238,7 @@ partial void ProcessCreateImageEditResponseContent( /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs index f931ef6158..977a210116 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs @@ -218,7 +218,7 @@ partial void ProcessCreateImageVariationResponseContent( /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObject.g.verified.cs index 27756c8a6c..6bf04183a2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObject.g.verified.cs @@ -40,7 +40,7 @@ public sealed partial class AssistantObject public string? Description { get; set; } = default!; /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::Newtonsoft.Json.JsonProperty("model", Required = global::Newtonsoft.Json.Required.Always)] public string Model { get; set; } = default!; @@ -52,7 +52,8 @@ public sealed partial class AssistantObject public string? Instructions { get; set; } = default!; /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("tools", Required = global::Newtonsoft.Json.Required.Always)] public global::System.Collections.Generic.IList Tools { get; set; } = default!; @@ -64,7 +65,7 @@ public sealed partial class AssistantObject public global::G.AssistantObjectToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata", Required = global::Newtonsoft.Json.Required.Always)] public object? Metadata { get; set; } = default!; @@ -89,9 +90,9 @@ public sealed partial class AssistantObject public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::Newtonsoft.Json.JsonProperty("response_format")] @@ -122,19 +123,20 @@ public sealed partial class AssistantObject /// The description of the assistant. The maximum length is 512 characters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
@@ -148,9 +150,9 @@ public sealed partial class AssistantObject /// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// public AssistantObject( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectMetadata.g.verified.cs index c2621d635c..98e5ca76f2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class AssistantObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs index 01a0fbbd27..82cd768061 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class AssistantObjectToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class AssistantObjectToolResourcesCodeInterpreter /// Initializes a new instance of the class. /// /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// public AssistantObjectToolResourcesCodeInterpreter( global::System.Collections.Generic.IList? fileIds) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEvent.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEvent.g.verified.cs index 4a3f9be760..3caffd0c6d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEvent.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEvent.g.verified.cs @@ -30,7 +30,7 @@ namespace G public global::G.AssistantStreamEventDiscriminatorEvent? Event { get; } /// - /// Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + /// Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. /// #if NET6_0_OR_GREATER public global::G.ErrorEvent? Error { get; init; } @@ -485,7 +485,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant13? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant14? ThreadRunStepCreated { get; init; } @@ -520,7 +520,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant14? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant15? ThreadRunStepInProgress { get; init; } @@ -555,7 +555,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant15? value) } /// - /// Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + /// Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant16? ThreadRunStepDelta { get; init; } @@ -590,7 +590,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant16? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant17? ThreadRunStepCompleted { get; init; } @@ -625,7 +625,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant17? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant18? ThreadRunStepFailed { get; init; } @@ -660,7 +660,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant18? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant19? ThreadRunStepCancelled { get; init; } @@ -695,7 +695,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant19? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant20? ThreadRunStepExpired { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs index c481ab90d9..6fb821bf7d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. /// public sealed partial class AssistantStreamEventVariant14 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs index b3e0e1e12d..68332d16ae 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. /// public sealed partial class AssistantStreamEventVariant15 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs index b1e8acb8eb..93dc309127 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + /// Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. /// public sealed partial class AssistantStreamEventVariant16 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs index a526ad8781..fde07f669d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. /// public sealed partial class AssistantStreamEventVariant17 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs index 597e3b62b7..ddbeca892c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. /// public sealed partial class AssistantStreamEventVariant18 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs index d1e1ea79ec..c4be08f829 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. /// public sealed partial class AssistantStreamEventVariant19 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs index 9e093d4105..c0b46b333d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. /// public sealed partial class AssistantStreamEventVariant20 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs index 32fa126f35..c73445f05a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs @@ -9,6 +9,12 @@ namespace G /// public sealed partial class AssistantStreamEventVariant3 { + /// + /// Whether to enable input audio transcription. + /// + [global::Newtonsoft.Json.JsonProperty("enabled")] + public bool? Enabled { get; set; } + /// /// /// @@ -30,15 +36,20 @@ public sealed partial class AssistantStreamEventVariant3 /// /// Initializes a new instance of the class. /// + /// + /// Whether to enable input audio transcription. + /// /// /// /// Represents a thread that contains [messages](/docs/api-reference/messages). /// public AssistantStreamEventVariant3( global::G.ThreadObject data, + bool? enabled, global::G.AssistantStreamEventVariant3Event @event) { this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.Enabled = enabled; this.Event = @event; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs index 73e20bd23f..83b146151b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs @@ -11,11 +11,18 @@ public sealed partial class AssistantToolsFileSearchFileSearch { /// /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.
- /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. ///
[global::Newtonsoft.Json.JsonProperty("max_num_results")] public int? MaxNumResults { get; set; } + /// + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
+ /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + ///
+ [global::Newtonsoft.Json.JsonProperty("ranking_options")] + public global::G.FileSearchRankingOptions? RankingOptions { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -27,12 +34,18 @@ public sealed partial class AssistantToolsFileSearchFileSearch /// /// /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.
- /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + /// + /// + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
+ /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. /// public AssistantToolsFileSearchFileSearch( - int? maxNumResults) + int? maxNumResults, + global::G.FileSearchRankingOptions? rankingOptions) { this.MaxNumResults = maxNumResults; + this.RankingOptions = rankingOptions; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs index 6e03692459..573a4c7ee3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs @@ -6,9 +6,9 @@ namespace G { /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
public readonly partial struct AssistantsApiResponseFormatOption : global::System.IEquatable diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranscriptionRequestResponseFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AudioResponseFormat.g.verified.cs similarity index 57% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranscriptionRequestResponseFormat.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AudioResponseFormat.g.verified.cs index 3c7e4ee22e..978a372dc1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranscriptionRequestResponseFormat.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AudioResponseFormat.g.verified.cs @@ -1,15 +1,15 @@ -//HintName: G.Models.CreateTranscriptionRequestResponseFormat.g.cs +//HintName: G.Models.AudioResponseFormat.g.cs #nullable enable namespace G { /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json ///
[global::System.Runtime.Serialization.DataContract] - public enum CreateTranscriptionRequestResponseFormat + public enum AudioResponseFormat { /// /// `json`, `text`, `srt`, `verbose_json`, or `vtt`. @@ -41,35 +41,35 @@ public enum CreateTranscriptionRequestResponseFormat /// /// Enum extensions to do fast conversions without the reflection. /// - public static class CreateTranscriptionRequestResponseFormatExtensions + public static class AudioResponseFormatExtensions { /// /// Converts an enum to a string. /// - public static string ToValueString(this CreateTranscriptionRequestResponseFormat value) + public static string ToValueString(this AudioResponseFormat value) { return value switch { - CreateTranscriptionRequestResponseFormat.Json => "json", - CreateTranscriptionRequestResponseFormat.Text => "text", - CreateTranscriptionRequestResponseFormat.Srt => "srt", - CreateTranscriptionRequestResponseFormat.VerboseJson => "verbose_json", - CreateTranscriptionRequestResponseFormat.Vtt => "vtt", + AudioResponseFormat.Json => "json", + AudioResponseFormat.Text => "text", + AudioResponseFormat.Srt => "srt", + AudioResponseFormat.VerboseJson => "verbose_json", + AudioResponseFormat.Vtt => "vtt", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } /// /// Converts an string to a enum. /// - public static CreateTranscriptionRequestResponseFormat? ToEnum(string value) + public static AudioResponseFormat? ToEnum(string value) { return value switch { - "json" => CreateTranscriptionRequestResponseFormat.Json, - "text" => CreateTranscriptionRequestResponseFormat.Text, - "srt" => CreateTranscriptionRequestResponseFormat.Srt, - "verbose_json" => CreateTranscriptionRequestResponseFormat.VerboseJson, - "vtt" => CreateTranscriptionRequestResponseFormat.Vtt, + "json" => AudioResponseFormat.Json, + "text" => AudioResponseFormat.Text, + "srt" => AudioResponseFormat.Srt, + "verbose_json" => AudioResponseFormat.VerboseJson, + "vtt" => AudioResponseFormat.Vtt, _ => null, }; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLog.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLog.g.verified.cs index dc2c67332a..f52c04d8c4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLog.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLog.g.verified.cs @@ -111,6 +111,18 @@ public sealed partial class AuditLog [global::Newtonsoft.Json.JsonProperty("project.archived")] public global::G.AuditLogProjectArchived? ProjectArchived { get; set; } + /// + /// The details for events with this `type`. + /// + [global::Newtonsoft.Json.JsonProperty("rate_limit.updated")] + public global::G.AuditLogRateLimitUpdated? RateLimitUpdated { get; set; } + + /// + /// The details for events with this `type`. + /// + [global::Newtonsoft.Json.JsonProperty("rate_limit.deleted")] + public global::G.AuditLogRateLimitDeleted? RateLimitDeleted { get; set; } + /// /// The details for events with this `type`. /// @@ -207,6 +219,12 @@ public sealed partial class AuditLog /// /// The details for events with this `type`. /// + /// + /// The details for events with this `type`. + /// + /// + /// The details for events with this `type`. + /// /// /// The details for events with this `type`. /// @@ -243,6 +261,8 @@ public AuditLog( global::G.AuditLogProjectCreated? projectCreated, global::G.AuditLogProjectUpdated? projectUpdated, global::G.AuditLogProjectArchived? projectArchived, + global::G.AuditLogRateLimitUpdated? rateLimitUpdated, + global::G.AuditLogRateLimitDeleted? rateLimitDeleted, global::G.AuditLogServiceAccountCreated? serviceAccountCreated, global::G.AuditLogServiceAccountUpdated? serviceAccountUpdated, global::G.AuditLogServiceAccountDeleted? serviceAccountDeleted, @@ -267,6 +287,8 @@ public AuditLog( this.ProjectCreated = projectCreated; this.ProjectUpdated = projectUpdated; this.ProjectArchived = projectArchived; + this.RateLimitUpdated = rateLimitUpdated; + this.RateLimitDeleted = rateLimitDeleted; this.ServiceAccountCreated = serviceAccountCreated; this.ServiceAccountUpdated = serviceAccountUpdated; this.ServiceAccountDeleted = serviceAccountDeleted; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogEventType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogEventType.g.verified.cs index 27b6e2dfca..c439b4fb39 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogEventType.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogEventType.g.verified.cs @@ -98,6 +98,16 @@ public enum AuditLogEventType /// /// /// + [global::System.Runtime.Serialization.EnumMember(Value="rate_limit.updated")] + RateLimitUpdated, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="rate_limit.deleted")] + RateLimitDeleted, + /// + /// + /// [global::System.Runtime.Serialization.EnumMember(Value="user.added")] UserAdded, /// @@ -141,6 +151,8 @@ public static string ToValueString(this AuditLogEventType value) AuditLogEventType.ServiceAccountCreated => "service_account.created", AuditLogEventType.ServiceAccountUpdated => "service_account.updated", AuditLogEventType.ServiceAccountDeleted => "service_account.deleted", + AuditLogEventType.RateLimitUpdated => "rate_limit.updated", + AuditLogEventType.RateLimitDeleted => "rate_limit.deleted", AuditLogEventType.UserAdded => "user.added", AuditLogEventType.UserUpdated => "user.updated", AuditLogEventType.UserDeleted => "user.deleted", @@ -171,6 +183,8 @@ public static string ToValueString(this AuditLogEventType value) "service_account.created" => AuditLogEventType.ServiceAccountCreated, "service_account.updated" => AuditLogEventType.ServiceAccountUpdated, "service_account.deleted" => AuditLogEventType.ServiceAccountDeleted, + "rate_limit.updated" => AuditLogEventType.RateLimitUpdated, + "rate_limit.deleted" => AuditLogEventType.RateLimitDeleted, "user.added" => AuditLogEventType.UserAdded, "user.updated" => AuditLogEventType.UserUpdated, "user.deleted" => AuditLogEventType.UserDeleted, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitDeleted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitDeleted.Json.g.verified.cs new file mode 100644 index 0000000000..ca814c6dd2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitDeleted.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.AuditLogRateLimitDeleted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class AuditLogRateLimitDeleted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.AuditLogRateLimitDeleted? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitDeleted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitDeleted.g.verified.cs new file mode 100644 index 0000000000..5ae490c52a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitDeleted.g.verified.cs @@ -0,0 +1,43 @@ +//HintName: G.Models.AuditLogRateLimitDeleted.g.cs + +#nullable enable + +namespace G +{ + /// + /// The details for events with this `type`. + /// + public sealed partial class AuditLogRateLimitDeleted + { + /// + /// The rate limit ID + /// + [global::Newtonsoft.Json.JsonProperty("id")] + public string? Id { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The rate limit ID + /// + public AuditLogRateLimitDeleted( + string? id) + { + this.Id = id; + } + + /// + /// Initializes a new instance of the class. + /// + public AuditLogRateLimitDeleted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdated.Json.g.verified.cs new file mode 100644 index 0000000000..5bc8f0c48e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.AuditLogRateLimitUpdated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class AuditLogRateLimitUpdated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.AuditLogRateLimitUpdated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdated.g.verified.cs new file mode 100644 index 0000000000..b45114b87c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdated.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.AuditLogRateLimitUpdated.g.cs + +#nullable enable + +namespace G +{ + /// + /// The details for events with this `type`. + /// + public sealed partial class AuditLogRateLimitUpdated + { + /// + /// The rate limit ID + /// + [global::Newtonsoft.Json.JsonProperty("id")] + public string? Id { get; set; } + + /// + /// The payload used to update the rate limits. + /// + [global::Newtonsoft.Json.JsonProperty("changes_requested")] + public global::G.AuditLogRateLimitUpdatedChangesRequested? ChangesRequested { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The rate limit ID + /// + /// + /// The payload used to update the rate limits. + /// + public AuditLogRateLimitUpdated( + string? id, + global::G.AuditLogRateLimitUpdatedChangesRequested? changesRequested) + { + this.Id = id; + this.ChangesRequested = changesRequested; + } + + /// + /// Initializes a new instance of the class. + /// + public AuditLogRateLimitUpdated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.verified.cs new file mode 100644 index 0000000000..47863881d0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class AuditLogRateLimitUpdatedChangesRequested + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.AuditLogRateLimitUpdatedChangesRequested? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.g.verified.cs new file mode 100644 index 0000000000..b2c0bcfda9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.g.verified.cs @@ -0,0 +1,98 @@ +//HintName: G.Models.AuditLogRateLimitUpdatedChangesRequested.g.cs + +#nullable enable + +namespace G +{ + /// + /// The payload used to update the rate limits. + /// + public sealed partial class AuditLogRateLimitUpdatedChangesRequested + { + /// + /// The maximum requests per minute. + /// + [global::Newtonsoft.Json.JsonProperty("max_requests_per_1_minute")] + public int? MaxRequestsPer1Minute { get; set; } + + /// + /// The maximum tokens per minute. + /// + [global::Newtonsoft.Json.JsonProperty("max_tokens_per_1_minute")] + public int? MaxTokensPer1Minute { get; set; } + + /// + /// The maximum images per minute. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("max_images_per_1_minute")] + public int? MaxImagesPer1Minute { get; set; } + + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("max_audio_megabytes_per_1_minute")] + public int? MaxAudioMegabytesPer1Minute { get; set; } + + /// + /// The maximum requests per day. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("max_requests_per_1_day")] + public int? MaxRequestsPer1Day { get; set; } + + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("batch_1_day_max_input_tokens")] + public int? Batch1DayMaxInputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + public AuditLogRateLimitUpdatedChangesRequested( + int? maxRequestsPer1Minute, + int? maxTokensPer1Minute, + int? maxImagesPer1Minute, + int? maxAudioMegabytesPer1Minute, + int? maxRequestsPer1Day, + int? batch1DayMaxInputTokens) + { + this.MaxRequestsPer1Minute = maxRequestsPer1Minute; + this.MaxTokensPer1Minute = maxTokensPer1Minute; + this.MaxImagesPer1Minute = maxImagesPer1Minute; + this.MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute; + this.MaxRequestsPer1Day = maxRequestsPer1Day; + this.Batch1DayMaxInputTokens = batch1DayMaxInputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public AuditLogRateLimitUpdatedChangesRequested() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Batch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Batch.g.verified.cs index 50f1729ec5..6c4054b663 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Batch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Batch.g.verified.cs @@ -124,7 +124,7 @@ public sealed partial class Batch public global::G.BatchRequestCounts? RequestCounts { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -192,7 +192,7 @@ public sealed partial class Batch /// The request counts for different statuses within the batch. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public Batch( string id, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.BatchMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.BatchMetadata.g.verified.cs index 31a0f61e04..86b8e0d459 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.BatchMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.BatchMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class BatchMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionModalitie.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionModalitie.g.verified.cs new file mode 100644 index 0000000000..0573bb77ea --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionModalitie.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.ChatCompletionModalitie.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum ChatCompletionModalitie + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="audio")] + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionModalitie value) + { + return value switch + { + ChatCompletionModalitie.Text => "text", + ChatCompletionModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionModalitie? ToEnum(string value) + { + return value switch + { + "text" => ChatCompletionModalitie.Text, + "audio" => ChatCompletionModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs index d86302b061..e610f63b61 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs @@ -35,6 +35,13 @@ public sealed partial class ChatCompletionRequestAssistantMessage [global::Newtonsoft.Json.JsonProperty("name")] public string? Name { get; set; } + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + ///
+ [global::Newtonsoft.Json.JsonProperty("audio")] + public global::G.ChatCompletionRequestAssistantMessageAudio? Audio { get; set; } + /// /// The tool calls generated by the model, such as function calls. /// @@ -69,6 +76,10 @@ public sealed partial class ChatCompletionRequestAssistantMessage /// /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. /// + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + /// /// /// The tool calls generated by the model, such as function calls. /// @@ -77,12 +88,14 @@ public ChatCompletionRequestAssistantMessage( string? refusal, global::G.ChatCompletionRequestAssistantMessageRole role, string? name, + global::G.ChatCompletionRequestAssistantMessageAudio? audio, global::System.Collections.Generic.IList? toolCalls) { this.Content = content; this.Refusal = refusal; this.Role = role; this.Name = name; + this.Audio = audio; this.ToolCalls = toolCalls; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.verified.cs new file mode 100644 index 0000000000..46ba921a63 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionRequestAssistantMessageAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionRequestAssistantMessageAudio? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.g.verified.cs new file mode 100644 index 0000000000..09def51775 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.g.verified.cs @@ -0,0 +1,44 @@ +//HintName: G.Models.ChatCompletionRequestAssistantMessageAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + ///
+ public sealed partial class ChatCompletionRequestAssistantMessageAudio + { + /// + /// Unique identifier for a previous audio response from the model. + /// + [global::Newtonsoft.Json.JsonProperty("id", Required = global::Newtonsoft.Json.Required.Always)] + public string Id { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Unique identifier for a previous audio response from the model. + /// + public ChatCompletionRequestAssistantMessageAudio( + string id) + { + this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionRequestAssistantMessageAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs index abe3683784..32db3c27d6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs @@ -16,7 +16,7 @@ namespace G public global::G.ChatCompletionRequestAssistantMessageContentPartDiscriminatorType? Type { get; } /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.verified.cs new file mode 100644 index 0000000000..df98e701ad --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionRequestMessageContentPartAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionRequestMessageContentPartAudio? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.g.verified.cs new file mode 100644 index 0000000000..2ddea3d6dc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// Learn about [audio inputs](/docs/guides/audio). + /// + public sealed partial class ChatCompletionRequestMessageContentPartAudio + { + /// + /// The type of the content part. Always `input_audio`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.ChatCompletionRequestMessageContentPartAudioType Type { get; set; } + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("input_audio", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.ChatCompletionRequestMessageContentPartAudioInputAudio InputAudio { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the content part. Always `input_audio`. + /// + /// + public ChatCompletionRequestMessageContentPartAudio( + global::G.ChatCompletionRequestMessageContentPartAudioInputAudio inputAudio, + global::G.ChatCompletionRequestMessageContentPartAudioType type) + { + this.InputAudio = inputAudio ?? throw new global::System.ArgumentNullException(nameof(inputAudio)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionRequestMessageContentPartAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.verified.cs new file mode 100644 index 0000000000..64885f85a9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionRequestMessageContentPartAudioInputAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionRequestMessageContentPartAudioInputAudio? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.verified.cs new file mode 100644 index 0000000000..42eacb0b00 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class ChatCompletionRequestMessageContentPartAudioInputAudio + { + /// + /// Base64 encoded audio data. + /// + [global::Newtonsoft.Json.JsonProperty("data", Required = global::Newtonsoft.Json.Required.Always)] + public string Data { get; set; } = default!; + + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + [global::Newtonsoft.Json.JsonProperty("format", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat Format { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Base64 encoded audio data. + /// + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + public ChatCompletionRequestMessageContentPartAudioInputAudio( + string data, + global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat format) + { + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.Format = format; + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionRequestMessageContentPartAudioInputAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs new file mode 100644 index 0000000000..ebcefdceb4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs + +#nullable enable + +namespace G +{ + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + [global::System.Runtime.Serialization.DataContract] + public enum ChatCompletionRequestMessageContentPartAudioInputAudioFormat + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="wav")] + Wav, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="mp3")] + Mp3, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionRequestMessageContentPartAudioInputAudioFormat value) + { + return value switch + { + ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Wav => "wav", + ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Mp3 => "mp3", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionRequestMessageContentPartAudioInputAudioFormat? ToEnum(string value) + { + return value switch + { + "wav" => ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Wav, + "mp3" => ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Mp3, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs new file mode 100644 index 0000000000..da61347e2c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the content part. Always `input_audio`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum ChatCompletionRequestMessageContentPartAudioType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio")] + InputAudio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionRequestMessageContentPartAudioTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionRequestMessageContentPartAudioType value) + { + return value switch + { + ChatCompletionRequestMessageContentPartAudioType.InputAudio => "input_audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionRequestMessageContentPartAudioType? ToEnum(string value) + { + return value switch + { + "input_audio" => ChatCompletionRequestMessageContentPartAudioType.InputAudio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs index ece83cf9e9..06563f3b97 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// + /// Learn about [image inputs](/docs/guides/vision). /// public sealed partial class ChatCompletionRequestMessageContentPartImage { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs index 5e5418ed02..047322717c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs @@ -16,7 +16,7 @@ public sealed partial class ChatCompletionRequestMessageContentPartImageImageUrl public string Url { get; set; } = default!; /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto ///
[global::Newtonsoft.Json.JsonProperty("detail")] @@ -35,7 +35,7 @@ public sealed partial class ChatCompletionRequestMessageContentPartImageImageUrl /// Either a URL of the image or the base64 encoded image data. /// /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto /// public ChatCompletionRequestMessageContentPartImageImageUrl( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs index 46d7fc55a2..31ad212537 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto ///
[global::System.Runtime.Serialization.DataContract] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs index 9c5dbe71d3..0078fdf0ea 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// public sealed partial class ChatCompletionRequestMessageContentPartText { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs index a9d8656e1e..a8fbe6d5ea 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs @@ -11,7 +11,7 @@ namespace G public readonly partial struct ChatCompletionRequestSystemMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs index 12c745b2c9..b9ee46522f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs @@ -11,7 +11,7 @@ namespace G public readonly partial struct ChatCompletionRequestToolMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs index b71a513153..ad9952ba45 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs @@ -16,7 +16,7 @@ namespace G public global::G.ChatCompletionRequestUserMessageContentPartDiscriminatorType? Type { get; } /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } @@ -51,7 +51,7 @@ public ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionReque } /// - /// + /// Learn about [image inputs](/docs/guides/vision). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartImage? ImageUrl { get; init; } @@ -85,25 +85,63 @@ public ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionReque ImageUrl = value; } + /// + /// Learn about [audio inputs](/docs/guides/audio). + /// +#if NET6_0_OR_GREATER + public global::G.ChatCompletionRequestMessageContentPartAudio? InputAudio { get; init; } +#else + public global::G.ChatCompletionRequestMessageContentPartAudio? InputAudio { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(InputAudio))] +#endif + public bool IsInputAudio => InputAudio != null; + + /// + /// + /// + public static implicit operator ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionRequestMessageContentPartAudio value) => new ChatCompletionRequestUserMessageContentPart(value); + + /// + /// + /// + public static implicit operator global::G.ChatCompletionRequestMessageContentPartAudio?(ChatCompletionRequestUserMessageContentPart @this) => @this.InputAudio; + + /// + /// + /// + public ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionRequestMessageContentPartAudio? value) + { + InputAudio = value; + } + /// /// /// public ChatCompletionRequestUserMessageContentPart( global::G.ChatCompletionRequestUserMessageContentPartDiscriminatorType? type, global::G.ChatCompletionRequestMessageContentPartText? text, - global::G.ChatCompletionRequestMessageContentPartImage? imageUrl + global::G.ChatCompletionRequestMessageContentPartImage? imageUrl, + global::G.ChatCompletionRequestMessageContentPartAudio? inputAudio ) { Type = type; Text = text; ImageUrl = imageUrl; + InputAudio = inputAudio; } /// /// /// public object? Object => + InputAudio as object ?? ImageUrl as object ?? Text as object ; @@ -113,7 +151,7 @@ Text as object ///
public bool Validate() { - return IsText && !IsImageUrl || !IsText && IsImageUrl; + return IsText && !IsImageUrl && !IsInputAudio || !IsText && IsImageUrl && !IsInputAudio || !IsText && !IsImageUrl && IsInputAudio; } /// @@ -122,6 +160,7 @@ public bool Validate() public TResult? Match( global::System.Func? text = null, global::System.Func? imageUrl = null, + global::System.Func? inputAudio = null, bool validate = true) { if (validate) @@ -137,6 +176,10 @@ public bool Validate() { return imageUrl(ImageUrl!); } + else if (IsInputAudio && inputAudio != null) + { + return inputAudio(InputAudio!); + } return default(TResult); } @@ -147,6 +190,7 @@ public bool Validate() public void Match( global::System.Action? text = null, global::System.Action? imageUrl = null, + global::System.Action? inputAudio = null, bool validate = true) { if (validate) @@ -162,6 +206,10 @@ public void Match( { imageUrl?.Invoke(ImageUrl!); } + else if (IsInputAudio) + { + inputAudio?.Invoke(InputAudio!); + } } /// @@ -175,6 +223,8 @@ public override int GetHashCode() typeof(global::G.ChatCompletionRequestMessageContentPartText), ImageUrl, typeof(global::G.ChatCompletionRequestMessageContentPartImage), + InputAudio, + typeof(global::G.ChatCompletionRequestMessageContentPartAudio), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -192,7 +242,8 @@ public bool Equals(ChatCompletionRequestUserMessageContentPart other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(InputAudio, other.InputAudio) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs index 3d572a7947..3c5e176db6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs @@ -20,6 +20,11 @@ public enum ChatCompletionRequestUserMessageContentPartDiscriminatorType /// [global::System.Runtime.Serialization.EnumMember(Value="image_url")] ImageUrl, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio")] + InputAudio, } /// @@ -36,6 +41,7 @@ public static string ToValueString(this ChatCompletionRequestUserMessageContentP { ChatCompletionRequestUserMessageContentPartDiscriminatorType.Text => "text", ChatCompletionRequestUserMessageContentPartDiscriminatorType.ImageUrl => "image_url", + ChatCompletionRequestUserMessageContentPartDiscriminatorType.InputAudio => "input_audio", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } @@ -48,6 +54,7 @@ public static string ToValueString(this ChatCompletionRequestUserMessageContentP { "text" => ChatCompletionRequestUserMessageContentPartDiscriminatorType.Text, "image_url" => ChatCompletionRequestUserMessageContentPartDiscriminatorType.ImageUrl, + "input_audio" => ChatCompletionRequestUserMessageContentPartDiscriminatorType.InputAudio, _ => null, }; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs index 7ba0883adf..8a092ac6c2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs @@ -40,6 +40,13 @@ public sealed partial class ChatCompletionResponseMessage [global::System.Obsolete("This property marked as deprecated.")] public global::G.ChatCompletionResponseMessageFunctionCall? FunctionCall { get; set; } + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + ///
+ [global::Newtonsoft.Json.JsonProperty("audio")] + public global::G.ChatCompletionResponseMessageAudio? Audio { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -61,16 +68,22 @@ public sealed partial class ChatCompletionResponseMessage /// /// The role of the author of this message. /// + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + /// public ChatCompletionResponseMessage( string? content, string? refusal, global::System.Collections.Generic.IList? toolCalls, - global::G.ChatCompletionResponseMessageRole role) + global::G.ChatCompletionResponseMessageRole role, + global::G.ChatCompletionResponseMessageAudio? audio) { this.Content = content ?? throw new global::System.ArgumentNullException(nameof(content)); this.Refusal = refusal ?? throw new global::System.ArgumentNullException(nameof(refusal)); this.ToolCalls = toolCalls; this.Role = role; + this.Audio = audio; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessageAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessageAudio.Json.g.verified.cs new file mode 100644 index 0000000000..638af1241b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessageAudio.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ChatCompletionResponseMessageAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionResponseMessageAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionResponseMessageAudio? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessageAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessageAudio.g.verified.cs new file mode 100644 index 0000000000..fea10832d3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ChatCompletionResponseMessageAudio.g.verified.cs @@ -0,0 +1,83 @@ +//HintName: G.Models.ChatCompletionResponseMessageAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + ///
+ public sealed partial class ChatCompletionResponseMessageAudio + { + /// + /// Unique identifier for this audio response. + /// + [global::Newtonsoft.Json.JsonProperty("id", Required = global::Newtonsoft.Json.Required.Always)] + public string Id { get; set; } = default!; + + /// + /// The Unix timestamp (in seconds) for when this audio response will
+ /// no longer be accessible on the server for use in multi-turn
+ /// conversations. + ///
+ [global::Newtonsoft.Json.JsonProperty("expires_at", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.DateTimeOffset ExpiresAt { get; set; } = default!; + + /// + /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request. + ///
+ [global::Newtonsoft.Json.JsonProperty("data", Required = global::Newtonsoft.Json.Required.Always)] + public string Data { get; set; } = default!; + + /// + /// Transcript of the audio generated by the model. + /// + [global::Newtonsoft.Json.JsonProperty("transcript", Required = global::Newtonsoft.Json.Required.Always)] + public string Transcript { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Unique identifier for this audio response. + /// + /// + /// The Unix timestamp (in seconds) for when this audio response will
+ /// no longer be accessible on the server for use in multi-turn
+ /// conversations. + /// + /// + /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request. + /// + /// + /// Transcript of the audio generated by the model. + /// + public ChatCompletionResponseMessageAudio( + string id, + global::System.DateTimeOffset expiresAt, + string data, + string transcript) + { + this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); + this.ExpiresAt = expiresAt; + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.Transcript = transcript ?? throw new global::System.ArgumentNullException(nameof(transcript)); + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionResponseMessageAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsage.g.verified.cs index 28cab95dcc..ded2251c0e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsage.g.verified.cs @@ -27,6 +27,18 @@ public sealed partial class CompletionUsage [global::Newtonsoft.Json.JsonProperty("total_tokens", Required = global::Newtonsoft.Json.Required.Always)] public int TotalTokens { get; set; } = default!; + /// + /// Breakdown of tokens used in a completion. + /// + [global::Newtonsoft.Json.JsonProperty("completion_tokens_details")] + public global::G.CompletionUsageCompletionTokensDetails? CompletionTokensDetails { get; set; } + + /// + /// Breakdown of tokens used in the prompt. + /// + [global::Newtonsoft.Json.JsonProperty("prompt_tokens_details")] + public global::G.CompletionUsagePromptTokensDetails? PromptTokensDetails { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -45,14 +57,24 @@ public sealed partial class CompletionUsage /// /// Total number of tokens used in the request (prompt + completion). /// + /// + /// Breakdown of tokens used in a completion. + /// + /// + /// Breakdown of tokens used in the prompt. + /// public CompletionUsage( int completionTokens, int promptTokens, - int totalTokens) + int totalTokens, + global::G.CompletionUsageCompletionTokensDetails? completionTokensDetails, + global::G.CompletionUsagePromptTokensDetails? promptTokensDetails) { this.CompletionTokens = completionTokens; this.PromptTokens = promptTokens; this.TotalTokens = totalTokens; + this.CompletionTokensDetails = completionTokensDetails; + this.PromptTokensDetails = promptTokensDetails; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsageCompletionTokensDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsageCompletionTokensDetails.Json.g.verified.cs new file mode 100644 index 0000000000..c0d9a47dbc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsageCompletionTokensDetails.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CompletionUsageCompletionTokensDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CompletionUsageCompletionTokensDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CompletionUsageCompletionTokensDetails? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsageCompletionTokensDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsageCompletionTokensDetails.g.verified.cs new file mode 100644 index 0000000000..478027e94f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsageCompletionTokensDetails.g.verified.cs @@ -0,0 +1,86 @@ +//HintName: G.Models.CompletionUsageCompletionTokensDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Breakdown of tokens used in a completion. + /// + public sealed partial class CompletionUsageCompletionTokensDetails + { + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that appeared in the completion. + ///
+ [global::Newtonsoft.Json.JsonProperty("accepted_prediction_tokens")] + public int? AcceptedPredictionTokens { get; set; } + + /// + /// Audio input tokens generated by the model. + /// + [global::Newtonsoft.Json.JsonProperty("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Tokens generated by the model for reasoning. + /// + [global::Newtonsoft.Json.JsonProperty("reasoning_tokens")] + public int? ReasoningTokens { get; set; } + + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that did not appear in the completion. However, like
+ /// reasoning tokens, these tokens are still counted in the total
+ /// completion tokens for purposes of billing, output, and context window
+ /// limits. + ///
+ [global::Newtonsoft.Json.JsonProperty("rejected_prediction_tokens")] + public int? RejectedPredictionTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that appeared in the completion. + /// + /// + /// Audio input tokens generated by the model. + /// + /// + /// Tokens generated by the model for reasoning. + /// + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that did not appear in the completion. However, like
+ /// reasoning tokens, these tokens are still counted in the total
+ /// completion tokens for purposes of billing, output, and context window
+ /// limits. + /// + public CompletionUsageCompletionTokensDetails( + int? acceptedPredictionTokens, + int? audioTokens, + int? reasoningTokens, + int? rejectedPredictionTokens) + { + this.AcceptedPredictionTokens = acceptedPredictionTokens; + this.AudioTokens = audioTokens; + this.ReasoningTokens = reasoningTokens; + this.RejectedPredictionTokens = rejectedPredictionTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public CompletionUsageCompletionTokensDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsagePromptTokensDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsagePromptTokensDetails.Json.g.verified.cs new file mode 100644 index 0000000000..434613c9db --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsagePromptTokensDetails.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CompletionUsagePromptTokensDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CompletionUsagePromptTokensDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CompletionUsagePromptTokensDetails? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsagePromptTokensDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsagePromptTokensDetails.g.verified.cs new file mode 100644 index 0000000000..a3d94bc63f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CompletionUsagePromptTokensDetails.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.CompletionUsagePromptTokensDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Breakdown of tokens used in the prompt. + /// + public sealed partial class CompletionUsagePromptTokensDetails + { + /// + /// Audio input tokens present in the prompt. + /// + [global::Newtonsoft.Json.JsonProperty("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Cached tokens present in the prompt. + /// + [global::Newtonsoft.Json.JsonProperty("cached_tokens")] + public int? CachedTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Audio input tokens present in the prompt. + /// + /// + /// Cached tokens present in the prompt. + /// + public CompletionUsagePromptTokensDetails( + int? audioTokens, + int? cachedTokens) + { + this.AudioTokens = audioTokens; + this.CachedTokens = cachedTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public CompletionUsagePromptTokensDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem.g.verified.cs index 048ae81008..164c4d4b13 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.MessageObjectContentItemDiscriminatorType? Type { get; } + public global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? Type { get; } /// /// References an image [File](/docs/api-reference/files) in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentImageFileObject? ImageFile { get; init; } + public global::G.MessageDeltaContentImageFileObject? ImageFile { get; init; } #else - public global::G.MessageContentImageFileObject? ImageFile { get; } + public global::G.MessageDeltaContentImageFileObject? ImageFile { get; } #endif /// @@ -35,152 +35,152 @@ namespace G /// /// /// - public static implicit operator ContentItem(global::G.MessageContentImageFileObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentImageFileObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentImageFileObject?(ContentItem @this) => @this.ImageFile; + public static implicit operator global::G.MessageDeltaContentImageFileObject?(ContentItem @this) => @this.ImageFile; /// /// /// - public ContentItem(global::G.MessageContentImageFileObject? value) + public ContentItem(global::G.MessageDeltaContentImageFileObject? value) { ImageFile = value; } /// - /// References an image URL in the content of a message. + /// The text content that is part of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentImageUrlObject? ImageUrl { get; init; } + public global::G.MessageDeltaContentTextObject? Text { get; init; } #else - public global::G.MessageContentImageUrlObject? ImageUrl { get; } + public global::G.MessageDeltaContentTextObject? Text { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] #endif - public bool IsImageUrl => ImageUrl != null; + public bool IsText => Text != null; /// /// /// - public static implicit operator ContentItem(global::G.MessageContentImageUrlObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentTextObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentImageUrlObject?(ContentItem @this) => @this.ImageUrl; + public static implicit operator global::G.MessageDeltaContentTextObject?(ContentItem @this) => @this.Text; /// /// /// - public ContentItem(global::G.MessageContentImageUrlObject? value) + public ContentItem(global::G.MessageDeltaContentTextObject? value) { - ImageUrl = value; + Text = value; } /// - /// The text content that is part of a message. + /// The refusal content that is part of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentTextObject? Text { get; init; } + public global::G.MessageDeltaContentRefusalObject? Refusal { get; init; } #else - public global::G.MessageContentTextObject? Text { get; } + public global::G.MessageDeltaContentRefusalObject? Refusal { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] #endif - public bool IsText => Text != null; + public bool IsRefusal => Refusal != null; /// /// /// - public static implicit operator ContentItem(global::G.MessageContentTextObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentRefusalObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentTextObject?(ContentItem @this) => @this.Text; + public static implicit operator global::G.MessageDeltaContentRefusalObject?(ContentItem @this) => @this.Refusal; /// /// /// - public ContentItem(global::G.MessageContentTextObject? value) + public ContentItem(global::G.MessageDeltaContentRefusalObject? value) { - Text = value; + Refusal = value; } /// - /// The refusal content generated by the assistant. + /// References an image URL in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentRefusalObject? Refusal { get; init; } + public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; init; } #else - public global::G.MessageContentRefusalObject? Refusal { get; } + public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] #endif - public bool IsRefusal => Refusal != null; + public bool IsImageUrl => ImageUrl != null; /// /// /// - public static implicit operator ContentItem(global::G.MessageContentRefusalObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentImageUrlObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentRefusalObject?(ContentItem @this) => @this.Refusal; + public static implicit operator global::G.MessageDeltaContentImageUrlObject?(ContentItem @this) => @this.ImageUrl; /// /// /// - public ContentItem(global::G.MessageContentRefusalObject? value) + public ContentItem(global::G.MessageDeltaContentImageUrlObject? value) { - Refusal = value; + ImageUrl = value; } /// /// /// public ContentItem( - global::G.MessageObjectContentItemDiscriminatorType? type, - global::G.MessageContentImageFileObject? imageFile, - global::G.MessageContentImageUrlObject? imageUrl, - global::G.MessageContentTextObject? text, - global::G.MessageContentRefusalObject? refusal + global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? type, + global::G.MessageDeltaContentImageFileObject? imageFile, + global::G.MessageDeltaContentTextObject? text, + global::G.MessageDeltaContentRefusalObject? refusal, + global::G.MessageDeltaContentImageUrlObject? imageUrl ) { Type = type; ImageFile = imageFile; - ImageUrl = imageUrl; Text = text; Refusal = refusal; + ImageUrl = imageUrl; } /// /// /// public object? Object => + ImageUrl as object ?? Refusal as object ?? Text as object ?? - ImageUrl as object ?? ImageFile as object ; @@ -189,17 +189,17 @@ ImageFile as object /// public bool Validate() { - return IsImageFile && !IsImageUrl && !IsText && !IsRefusal || !IsImageFile && IsImageUrl && !IsText && !IsRefusal || !IsImageFile && !IsImageUrl && IsText && !IsRefusal || !IsImageFile && !IsImageUrl && !IsText && IsRefusal; + return IsImageFile && !IsText && !IsRefusal && !IsImageUrl || !IsImageFile && IsText && !IsRefusal && !IsImageUrl || !IsImageFile && !IsText && IsRefusal && !IsImageUrl || !IsImageFile && !IsText && !IsRefusal && IsImageUrl; } /// /// /// public TResult? Match( - global::System.Func? imageFile = null, - global::System.Func? imageUrl = null, - global::System.Func? text = null, - global::System.Func? refusal = null, + global::System.Func? imageFile = null, + global::System.Func? text = null, + global::System.Func? refusal = null, + global::System.Func? imageUrl = null, bool validate = true) { if (validate) @@ -211,10 +211,6 @@ public bool Validate() { return imageFile(ImageFile!); } - else if (IsImageUrl && imageUrl != null) - { - return imageUrl(ImageUrl!); - } else if (IsText && text != null) { return text(Text!); @@ -223,6 +219,10 @@ public bool Validate() { return refusal(Refusal!); } + else if (IsImageUrl && imageUrl != null) + { + return imageUrl(ImageUrl!); + } return default(TResult); } @@ -231,10 +231,10 @@ public bool Validate() /// ///
public void Match( - global::System.Action? imageFile = null, - global::System.Action? imageUrl = null, - global::System.Action? text = null, - global::System.Action? refusal = null, + global::System.Action? imageFile = null, + global::System.Action? text = null, + global::System.Action? refusal = null, + global::System.Action? imageUrl = null, bool validate = true) { if (validate) @@ -246,10 +246,6 @@ public void Match( { imageFile?.Invoke(ImageFile!); } - else if (IsImageUrl) - { - imageUrl?.Invoke(ImageUrl!); - } else if (IsText) { text?.Invoke(Text!); @@ -258,6 +254,10 @@ public void Match( { refusal?.Invoke(Refusal!); } + else if (IsImageUrl) + { + imageUrl?.Invoke(ImageUrl!); + } } /// @@ -268,13 +268,13 @@ public override int GetHashCode() var fields = new object?[] { ImageFile, - typeof(global::G.MessageContentImageFileObject), - ImageUrl, - typeof(global::G.MessageContentImageUrlObject), + typeof(global::G.MessageDeltaContentImageFileObject), Text, - typeof(global::G.MessageContentTextObject), + typeof(global::G.MessageDeltaContentTextObject), Refusal, - typeof(global::G.MessageContentRefusalObject), + typeof(global::G.MessageDeltaContentRefusalObject), + ImageUrl, + typeof(global::G.MessageDeltaContentImageUrlObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -291,10 +291,10 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ContentItem other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem2.g.verified.cs index c472174993..079b89bd27 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem2.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ContentItem2.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? Type { get; } + public global::G.MessageObjectContentItemDiscriminatorType? Type { get; } /// /// References an image [File](/docs/api-reference/files) in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentImageFileObject? ImageFile { get; init; } + public global::G.MessageContentImageFileObject? ImageFile { get; init; } #else - public global::G.MessageDeltaContentImageFileObject? ImageFile { get; } + public global::G.MessageContentImageFileObject? ImageFile { get; } #endif /// @@ -35,152 +35,152 @@ namespace G /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentImageFileObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentImageFileObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentImageFileObject?(ContentItem2 @this) => @this.ImageFile; + public static implicit operator global::G.MessageContentImageFileObject?(ContentItem2 @this) => @this.ImageFile; /// /// /// - public ContentItem2(global::G.MessageDeltaContentImageFileObject? value) + public ContentItem2(global::G.MessageContentImageFileObject? value) { ImageFile = value; } /// - /// The text content that is part of a message. + /// References an image URL in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentTextObject? Text { get; init; } + public global::G.MessageContentImageUrlObject? ImageUrl { get; init; } #else - public global::G.MessageDeltaContentTextObject? Text { get; } + public global::G.MessageContentImageUrlObject? ImageUrl { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] #endif - public bool IsText => Text != null; + public bool IsImageUrl => ImageUrl != null; /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentTextObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentImageUrlObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentTextObject?(ContentItem2 @this) => @this.Text; + public static implicit operator global::G.MessageContentImageUrlObject?(ContentItem2 @this) => @this.ImageUrl; /// /// /// - public ContentItem2(global::G.MessageDeltaContentTextObject? value) + public ContentItem2(global::G.MessageContentImageUrlObject? value) { - Text = value; + ImageUrl = value; } /// - /// The refusal content that is part of a message. + /// The text content that is part of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentRefusalObject? Refusal { get; init; } + public global::G.MessageContentTextObject? Text { get; init; } #else - public global::G.MessageDeltaContentRefusalObject? Refusal { get; } + public global::G.MessageContentTextObject? Text { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] #endif - public bool IsRefusal => Refusal != null; + public bool IsText => Text != null; /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentRefusalObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentTextObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentRefusalObject?(ContentItem2 @this) => @this.Refusal; + public static implicit operator global::G.MessageContentTextObject?(ContentItem2 @this) => @this.Text; /// /// /// - public ContentItem2(global::G.MessageDeltaContentRefusalObject? value) + public ContentItem2(global::G.MessageContentTextObject? value) { - Refusal = value; + Text = value; } /// - /// References an image URL in the content of a message. + /// The refusal content generated by the assistant. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; init; } + public global::G.MessageContentRefusalObject? Refusal { get; init; } #else - public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; } + public global::G.MessageContentRefusalObject? Refusal { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] #endif - public bool IsImageUrl => ImageUrl != null; + public bool IsRefusal => Refusal != null; /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentImageUrlObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentRefusalObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentImageUrlObject?(ContentItem2 @this) => @this.ImageUrl; + public static implicit operator global::G.MessageContentRefusalObject?(ContentItem2 @this) => @this.Refusal; /// /// /// - public ContentItem2(global::G.MessageDeltaContentImageUrlObject? value) + public ContentItem2(global::G.MessageContentRefusalObject? value) { - ImageUrl = value; + Refusal = value; } /// /// /// public ContentItem2( - global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? type, - global::G.MessageDeltaContentImageFileObject? imageFile, - global::G.MessageDeltaContentTextObject? text, - global::G.MessageDeltaContentRefusalObject? refusal, - global::G.MessageDeltaContentImageUrlObject? imageUrl + global::G.MessageObjectContentItemDiscriminatorType? type, + global::G.MessageContentImageFileObject? imageFile, + global::G.MessageContentImageUrlObject? imageUrl, + global::G.MessageContentTextObject? text, + global::G.MessageContentRefusalObject? refusal ) { Type = type; ImageFile = imageFile; + ImageUrl = imageUrl; Text = text; Refusal = refusal; - ImageUrl = imageUrl; } /// /// /// public object? Object => - ImageUrl as object ?? Refusal as object ?? Text as object ?? + ImageUrl as object ?? ImageFile as object ; @@ -189,17 +189,17 @@ ImageFile as object /// public bool Validate() { - return IsImageFile && !IsText && !IsRefusal && !IsImageUrl || !IsImageFile && IsText && !IsRefusal && !IsImageUrl || !IsImageFile && !IsText && IsRefusal && !IsImageUrl || !IsImageFile && !IsText && !IsRefusal && IsImageUrl; + return IsImageFile && !IsImageUrl && !IsText && !IsRefusal || !IsImageFile && IsImageUrl && !IsText && !IsRefusal || !IsImageFile && !IsImageUrl && IsText && !IsRefusal || !IsImageFile && !IsImageUrl && !IsText && IsRefusal; } /// /// /// public TResult? Match( - global::System.Func? imageFile = null, - global::System.Func? text = null, - global::System.Func? refusal = null, - global::System.Func? imageUrl = null, + global::System.Func? imageFile = null, + global::System.Func? imageUrl = null, + global::System.Func? text = null, + global::System.Func? refusal = null, bool validate = true) { if (validate) @@ -211,6 +211,10 @@ public bool Validate() { return imageFile(ImageFile!); } + else if (IsImageUrl && imageUrl != null) + { + return imageUrl(ImageUrl!); + } else if (IsText && text != null) { return text(Text!); @@ -219,10 +223,6 @@ public bool Validate() { return refusal(Refusal!); } - else if (IsImageUrl && imageUrl != null) - { - return imageUrl(ImageUrl!); - } return default(TResult); } @@ -231,10 +231,10 @@ public bool Validate() /// /// public void Match( - global::System.Action? imageFile = null, - global::System.Action? text = null, - global::System.Action? refusal = null, - global::System.Action? imageUrl = null, + global::System.Action? imageFile = null, + global::System.Action? imageUrl = null, + global::System.Action? text = null, + global::System.Action? refusal = null, bool validate = true) { if (validate) @@ -246,6 +246,10 @@ public void Match( { imageFile?.Invoke(ImageFile!); } + else if (IsImageUrl) + { + imageUrl?.Invoke(ImageUrl!); + } else if (IsText) { text?.Invoke(Text!); @@ -254,10 +258,6 @@ public void Match( { refusal?.Invoke(Refusal!); } - else if (IsImageUrl) - { - imageUrl?.Invoke(ImageUrl!); - } } /// @@ -268,13 +268,13 @@ public override int GetHashCode() var fields = new object?[] { ImageFile, - typeof(global::G.MessageDeltaContentImageFileObject), + typeof(global::G.MessageContentImageFileObject), + ImageUrl, + typeof(global::G.MessageContentImageUrlObject), Text, - typeof(global::G.MessageDeltaContentTextObject), + typeof(global::G.MessageContentTextObject), Refusal, - typeof(global::G.MessageDeltaContentRefusalObject), - ImageUrl, - typeof(global::G.MessageDeltaContentImageUrlObject), + typeof(global::G.MessageContentRefusalObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -291,10 +291,10 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ContentItem2 other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResult.Json.g.verified.cs new file mode 100644 index 0000000000..71d52cc590 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CostsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CostsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CostsResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResult.g.verified.cs new file mode 100644 index 0000000000..246c631f23 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResult.g.verified.cs @@ -0,0 +1,74 @@ +//HintName: G.Models.CostsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated costs details of the specific time bucket. + /// + public sealed partial class CostsResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.CostsResultObject Object { get; set; } + + /// + /// The monetary value in its associated currency. + /// + [global::Newtonsoft.Json.JsonProperty("amount")] + public global::G.CostsResultAmount? Amount { get; set; } + + /// + /// When `group_by=line_item`, this field provides the line item of the grouped costs result. + /// + [global::Newtonsoft.Json.JsonProperty("line_item")] + public string? LineItem { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped costs result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The monetary value in its associated currency. + /// + /// + /// When `group_by=line_item`, this field provides the line item of the grouped costs result. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped costs result. + /// + public CostsResult( + global::G.CostsResultObject @object, + global::G.CostsResultAmount? amount, + string? lineItem, + string? projectId) + { + this.Object = @object; + this.Amount = amount; + this.LineItem = lineItem; + this.ProjectId = projectId; + } + + /// + /// Initializes a new instance of the class. + /// + public CostsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultAmount.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultAmount.Json.g.verified.cs new file mode 100644 index 0000000000..e617429f4a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultAmount.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CostsResultAmount.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CostsResultAmount + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CostsResultAmount? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultAmount.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultAmount.g.verified.cs new file mode 100644 index 0000000000..843f5cf423 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultAmount.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.CostsResultAmount.g.cs + +#nullable enable + +namespace G +{ + /// + /// The monetary value in its associated currency. + /// + public sealed partial class CostsResultAmount + { + /// + /// The numeric value of the cost. + /// + [global::Newtonsoft.Json.JsonProperty("value")] + public double? Value { get; set; } + + /// + /// Lowercase ISO-4217 currency e.g. "usd" + /// + [global::Newtonsoft.Json.JsonProperty("currency")] + public string? Currency { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The numeric value of the cost. + /// + /// + /// Lowercase ISO-4217 currency e.g. "usd" + /// + public CostsResultAmount( + double? value, + string? currency) + { + this.Value = value; + this.Currency = currency; + } + + /// + /// Initializes a new instance of the class. + /// + public CostsResultAmount() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultObject.g.verified.cs new file mode 100644 index 0000000000..a2acd71ddb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CostsResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CostsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CostsResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.costs.result")] + OrganizationCostsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CostsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CostsResultObject value) + { + return value switch + { + CostsResultObject.OrganizationCostsResult => "organization.costs.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CostsResultObject? ToEnum(string value) + { + return value switch + { + "organization.costs.result" => CostsResultObject.OrganizationCostsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequest.g.verified.cs index 773d818825..37ce853bde 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequest.g.verified.cs @@ -12,7 +12,7 @@ namespace G public sealed partial class CreateAssistantRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o ///
/// gpt-4o @@ -38,7 +38,8 @@ public sealed partial class CreateAssistantRequest public string? Instructions { get; set; } /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("tools")] public global::System.Collections.Generic.IList? Tools { get; set; } @@ -50,13 +51,13 @@ public sealed partial class CreateAssistantRequest public global::G.CreateAssistantRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -65,7 +66,8 @@ public sealed partial class CreateAssistantRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -74,9 +76,9 @@ public sealed partial class CreateAssistantRequest public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::Newtonsoft.Json.JsonProperty("response_format")] @@ -92,7 +94,7 @@ public sealed partial class CreateAssistantRequest /// Initializes a new instance of the class. ///
/// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -105,28 +107,30 @@ public sealed partial class CreateAssistantRequest /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// public CreateAssistantRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs index 9f4b31846c..488a6d875c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateAssistantRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs index 141cf6078d..f92cec7434 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs @@ -18,6 +18,11 @@ public enum CreateAssistantRequestModel /// /// /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-11-20")] + Gpt4o20241120, + /// + /// + /// [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-08-06")] Gpt4o20240806, /// @@ -140,6 +145,7 @@ public static string ToValueString(this CreateAssistantRequestModel value) return value switch { CreateAssistantRequestModel.Gpt4o => "gpt-4o", + CreateAssistantRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateAssistantRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateAssistantRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", CreateAssistantRequestModel.Gpt4oMini => "gpt-4o-mini", @@ -173,6 +179,7 @@ public static string ToValueString(this CreateAssistantRequestModel value) return value switch { "gpt-4o" => CreateAssistantRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateAssistantRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateAssistantRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateAssistantRequestModel.Gpt4o20240513, "gpt-4o-mini" => CreateAssistantRequestModel.Gpt4oMini, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs index f9a4bff41d..7b2733dd5e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class CreateAssistantRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class CreateAssistantRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// public CreateAssistantRequestToolResourcesCodeInterpreter( global::System.Collections.Generic.IList? fileIds) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs index d3d8161259..edbef7279e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs @@ -22,7 +22,7 @@ public sealed partial class CreateAssistantRequestToolResourcesFileSearchVectorS public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy? ChunkingStrategy { get; set; } /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -43,7 +43,7 @@ public sealed partial class CreateAssistantRequestToolResourcesFileSearchVectorS /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public CreateAssistantRequestToolResourcesFileSearchVectorStore( global::System.Collections.Generic.IList? fileIds, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs index 1782f87f0f..fad7c36556 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateBatchRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateBatchRequest.g.verified.cs index 0b7ee5d72e..a74142a9f8 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateBatchRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateBatchRequest.g.verified.cs @@ -12,7 +12,7 @@ public sealed partial class CreateBatchRequest /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. ///
[global::Newtonsoft.Json.JsonProperty("input_file_id", Required = global::Newtonsoft.Json.Required.Always)] public string InputFileId { get; set; } = default!; @@ -47,7 +47,7 @@ public sealed partial class CreateBatchRequest /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. /// /// /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs index 245eab6800..c0c4ff36ec 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs @@ -12,22 +12,40 @@ namespace G public sealed partial class CreateChatCompletionRequest { /// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). ///
[global::Newtonsoft.Json.JsonProperty("messages", Required = global::Newtonsoft.Json.Required.Always)] public global::System.Collections.Generic.IList Messages { get; set; } = default!; /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o ///
/// gpt-4o [global::Newtonsoft.Json.JsonProperty("model", Required = global::Newtonsoft.Json.Required.Always)] public global::G.AnyOf Model { get; set; } = default!; + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + ///
+ [global::Newtonsoft.Json.JsonProperty("store")] + public bool? Store { get; set; } + + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + ///
+ [global::Newtonsoft.Json.JsonProperty("metadata")] + public global::System.Collections.Generic.Dictionary? Metadata { get; set; } + /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::Newtonsoft.Json.JsonProperty("frequency_penalty")] @@ -54,12 +72,19 @@ public sealed partial class CreateChatCompletionRequest public int? TopLogprobs { get; set; } /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.
+ /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). ///
[global::Newtonsoft.Json.JsonProperty("max_tokens")] + [global::System.Obsolete("This property marked as deprecated.")] public int? MaxTokens { get; set; } + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + /// + [global::Newtonsoft.Json.JsonProperty("max_completion_tokens")] + public int? MaxCompletionTokens { get; set; } + /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
@@ -69,18 +94,46 @@ public sealed partial class CreateChatCompletionRequest [global::Newtonsoft.Json.JsonProperty("n")] public int? N { get; set; } + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + ///
+ [global::Newtonsoft.Json.JsonProperty("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + ///
+ [global::Newtonsoft.Json.JsonProperty("prediction")] + public global::G.PredictionContent? Prediction { get; set; } + + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + ///
+ [global::Newtonsoft.Json.JsonProperty("audio")] + public global::G.CreateChatCompletionRequestAudio? Audio { get; set; } + /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::Newtonsoft.Json.JsonProperty("presence_penalty")] public double? PresencePenalty { get; set; } /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::Newtonsoft.Json.JsonProperty("response_format")] @@ -96,10 +149,12 @@ public sealed partial class CreateChatCompletionRequest /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto ///
[global::Newtonsoft.Json.JsonProperty("service_tier")] public global::G.CreateChatCompletionRequestServiceTier? ServiceTier { get; set; } @@ -161,13 +216,14 @@ public sealed partial class CreateChatCompletionRequest public global::G.ChatCompletionToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::Newtonsoft.Json.JsonProperty("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -203,15 +259,27 @@ public sealed partial class CreateChatCompletionRequest /// Initializes a new instance of the class. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -225,24 +293,42 @@ public sealed partial class CreateChatCompletionRequest /// /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. /// - /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). /// /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// @@ -252,10 +338,12 @@ public sealed partial class CreateChatCompletionRequest /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -291,21 +379,27 @@ public sealed partial class CreateChatCompletionRequest /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// public CreateChatCompletionRequest( global::System.Collections.Generic.IList messages, global::G.AnyOf model, + bool? store, + global::System.Collections.Generic.Dictionary? metadata, double? frequencyPenalty, global::System.Collections.Generic.Dictionary? logitBias, bool? logprobs, int? topLogprobs, - int? maxTokens, + int? maxCompletionTokens, int? n, + global::System.Collections.Generic.IList? modalities, + global::G.PredictionContent? prediction, + global::G.CreateChatCompletionRequestAudio? audio, double? presencePenalty, global::G.ResponseFormat? responseFormat, int? seed, @@ -322,12 +416,17 @@ public CreateChatCompletionRequest( { this.Messages = messages ?? throw new global::System.ArgumentNullException(nameof(messages)); this.Model = model; + this.Store = store; + this.Metadata = metadata; this.FrequencyPenalty = frequencyPenalty; this.LogitBias = logitBias; this.Logprobs = logprobs; this.TopLogprobs = topLogprobs; - this.MaxTokens = maxTokens; + this.MaxCompletionTokens = maxCompletionTokens; this.N = n; + this.Modalities = modalities; + this.Prediction = prediction; + this.Audio = audio; this.PresencePenalty = presencePenalty; this.ResponseFormat = responseFormat; this.Seed = seed; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudio.Json.g.verified.cs new file mode 100644 index 0000000000..4e26e43ad8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudio.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CreateChatCompletionRequestAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateChatCompletionRequestAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateChatCompletionRequestAudio? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudio.g.verified.cs new file mode 100644 index 0000000000..caae9a874c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudio.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: G.Models.CreateChatCompletionRequestAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + ///
+ public sealed partial class CreateChatCompletionRequestAudio + { + /// + /// The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). + /// + [global::Newtonsoft.Json.JsonProperty("voice", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.CreateChatCompletionRequestAudioVoice Voice { get; set; } = default!; + + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + ///
+ [global::Newtonsoft.Json.JsonProperty("format", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.CreateChatCompletionRequestAudioFormat Format { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). + /// + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + /// + public CreateChatCompletionRequestAudio( + global::G.CreateChatCompletionRequestAudioVoice voice, + global::G.CreateChatCompletionRequestAudioFormat format) + { + this.Voice = voice; + this.Format = format; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateChatCompletionRequestAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudioFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudioFormat.g.verified.cs new file mode 100644 index 0000000000..0c2a2c7753 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudioFormat.g.verified.cs @@ -0,0 +1,77 @@ +//HintName: G.Models.CreateChatCompletionRequestAudioFormat.g.cs + +#nullable enable + +namespace G +{ + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum CreateChatCompletionRequestAudioFormat + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="wav")] + Wav, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="mp3")] + Mp3, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="flac")] + Flac, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="opus")] + Opus, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="pcm16")] + Pcm16, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateChatCompletionRequestAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateChatCompletionRequestAudioFormat value) + { + return value switch + { + CreateChatCompletionRequestAudioFormat.Wav => "wav", + CreateChatCompletionRequestAudioFormat.Mp3 => "mp3", + CreateChatCompletionRequestAudioFormat.Flac => "flac", + CreateChatCompletionRequestAudioFormat.Opus => "opus", + CreateChatCompletionRequestAudioFormat.Pcm16 => "pcm16", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateChatCompletionRequestAudioFormat? ToEnum(string value) + { + return value switch + { + "wav" => CreateChatCompletionRequestAudioFormat.Wav, + "mp3" => CreateChatCompletionRequestAudioFormat.Mp3, + "flac" => CreateChatCompletionRequestAudioFormat.Flac, + "opus" => CreateChatCompletionRequestAudioFormat.Opus, + "pcm16" => CreateChatCompletionRequestAudioFormat.Pcm16, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudioVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudioVoice.g.verified.cs new file mode 100644 index 0000000000..c78936dd81 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestAudioVoice.g.verified.cs @@ -0,0 +1,97 @@ +//HintName: G.Models.CreateChatCompletionRequestAudioVoice.g.cs + +#nullable enable + +namespace G +{ + /// + /// The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateChatCompletionRequestAudioVoice + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="alloy")] + Alloy, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="ash")] + Ash, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="ballad")] + Ballad, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="coral")] + Coral, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="echo")] + Echo, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="sage")] + Sage, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="shimmer")] + Shimmer, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="verse")] + Verse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateChatCompletionRequestAudioVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateChatCompletionRequestAudioVoice value) + { + return value switch + { + CreateChatCompletionRequestAudioVoice.Alloy => "alloy", + CreateChatCompletionRequestAudioVoice.Ash => "ash", + CreateChatCompletionRequestAudioVoice.Ballad => "ballad", + CreateChatCompletionRequestAudioVoice.Coral => "coral", + CreateChatCompletionRequestAudioVoice.Echo => "echo", + CreateChatCompletionRequestAudioVoice.Sage => "sage", + CreateChatCompletionRequestAudioVoice.Shimmer => "shimmer", + CreateChatCompletionRequestAudioVoice.Verse => "verse", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateChatCompletionRequestAudioVoice? ToEnum(string value) + { + return value switch + { + "alloy" => CreateChatCompletionRequestAudioVoice.Alloy, + "ash" => CreateChatCompletionRequestAudioVoice.Ash, + "ballad" => CreateChatCompletionRequestAudioVoice.Ballad, + "coral" => CreateChatCompletionRequestAudioVoice.Coral, + "echo" => CreateChatCompletionRequestAudioVoice.Echo, + "sage" => CreateChatCompletionRequestAudioVoice.Sage, + "shimmer" => CreateChatCompletionRequestAudioVoice.Shimmer, + "verse" => CreateChatCompletionRequestAudioVoice.Verse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestMetadata.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestMetadata.Json.g.verified.cs new file mode 100644 index 0000000000..4dd0e3e969 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestMetadata.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CreateChatCompletionRequestMetadata.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateChatCompletionRequestMetadata + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateChatCompletionRequestMetadata? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestMetadata.g.verified.cs new file mode 100644 index 0000000000..48b61455df --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestMetadata.g.verified.cs @@ -0,0 +1,28 @@ +//HintName: G.Models.CreateChatCompletionRequestMetadata.g.cs + +#nullable enable + +namespace G +{ + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + ///
+ public sealed partial class CreateChatCompletionRequestMetadata + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + public CreateChatCompletionRequestMetadata( + ) + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs index c3a189a720..ed3c748203 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs @@ -10,6 +10,26 @@ namespace G [global::System.Runtime.Serialization.DataContract] public enum CreateChatCompletionRequestModel { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="o1-preview")] + O1Preview, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="o1-preview-2024-09-12")] + O1Preview20240912, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="o1-mini")] + O1Mini, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="o1-mini-2024-09-12")] + O1Mini20240912, /// /// /// @@ -18,6 +38,11 @@ public enum CreateChatCompletionRequestModel /// /// /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-11-20")] + Gpt4o20241120, + /// + /// + /// [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-08-06")] Gpt4o20240806, /// @@ -28,6 +53,31 @@ public enum CreateChatCompletionRequestModel /// /// /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-realtime-preview")] + Gpt4oRealtimePreview, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-realtime-preview-2024-10-01")] + Gpt4oRealtimePreview20241001, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-audio-preview")] + Gpt4oAudioPreview, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-audio-preview-2024-10-01")] + Gpt4oAudioPreview20241001, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="chatgpt-4o-latest")] + Chatgpt4oLatest, + /// + /// + /// [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-mini")] Gpt4oMini, /// @@ -144,9 +194,19 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) { return value switch { + CreateChatCompletionRequestModel.O1Preview => "o1-preview", + CreateChatCompletionRequestModel.O1Preview20240912 => "o1-preview-2024-09-12", + CreateChatCompletionRequestModel.O1Mini => "o1-mini", + CreateChatCompletionRequestModel.O1Mini20240912 => "o1-mini-2024-09-12", CreateChatCompletionRequestModel.Gpt4o => "gpt-4o", + CreateChatCompletionRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateChatCompletionRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateChatCompletionRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", + CreateChatCompletionRequestModel.Gpt4oRealtimePreview => "gpt-4o-realtime-preview", + CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001 => "gpt-4o-realtime-preview-2024-10-01", + CreateChatCompletionRequestModel.Gpt4oAudioPreview => "gpt-4o-audio-preview", + CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001 => "gpt-4o-audio-preview-2024-10-01", + CreateChatCompletionRequestModel.Chatgpt4oLatest => "chatgpt-4o-latest", CreateChatCompletionRequestModel.Gpt4oMini => "gpt-4o-mini", CreateChatCompletionRequestModel.Gpt4oMini20240718 => "gpt-4o-mini-2024-07-18", CreateChatCompletionRequestModel.Gpt4Turbo => "gpt-4-turbo", @@ -178,9 +238,19 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) { return value switch { + "o1-preview" => CreateChatCompletionRequestModel.O1Preview, + "o1-preview-2024-09-12" => CreateChatCompletionRequestModel.O1Preview20240912, + "o1-mini" => CreateChatCompletionRequestModel.O1Mini, + "o1-mini-2024-09-12" => CreateChatCompletionRequestModel.O1Mini20240912, "gpt-4o" => CreateChatCompletionRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateChatCompletionRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateChatCompletionRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateChatCompletionRequestModel.Gpt4o20240513, + "gpt-4o-realtime-preview" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview, + "gpt-4o-realtime-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001, + "gpt-4o-audio-preview" => CreateChatCompletionRequestModel.Gpt4oAudioPreview, + "gpt-4o-audio-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001, + "chatgpt-4o-latest" => CreateChatCompletionRequestModel.Chatgpt4oLatest, "gpt-4o-mini" => CreateChatCompletionRequestModel.Gpt4oMini, "gpt-4o-mini-2024-07-18" => CreateChatCompletionRequestModel.Gpt4oMini20240718, "gpt-4-turbo" => CreateChatCompletionRequestModel.Gpt4Turbo, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs index 8c5e00a788..65251ef4b5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs @@ -6,10 +6,12 @@ namespace G { /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto ///
[global::System.Runtime.Serialization.DataContract] public enum CreateChatCompletionRequestServiceTier diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs index 4851d1f26c..54b9fc8953 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs @@ -33,8 +33,8 @@ public sealed partial class CreateChatCompletionResponseChoice /// /// Log probability information for the choice. /// - [global::Newtonsoft.Json.JsonProperty("logprobs")] - public global::G.CreateChatCompletionResponseChoiceLogprobs? Logprobs { get; set; } + [global::Newtonsoft.Json.JsonProperty("logprobs", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.CreateChatCompletionResponseChoiceLogprobs? Logprobs { get; set; } = default!; /// /// Additional properties that are not explicitly defined in the schema @@ -69,7 +69,7 @@ public CreateChatCompletionResponseChoice( this.FinishReason = finishReason; this.Index = index; this.Message = message ?? throw new global::System.ArgumentNullException(nameof(message)); - this.Logprobs = logprobs; + this.Logprobs = logprobs ?? throw new global::System.ArgumentNullException(nameof(logprobs)); } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateCompletionRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateCompletionRequest.g.verified.cs index 59e0b63820..76646b69d3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateCompletionRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateCompletionRequest.g.verified.cs @@ -12,7 +12,7 @@ namespace G public sealed partial class CreateCompletionRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::Newtonsoft.Json.JsonProperty("model", Required = global::Newtonsoft.Json.Required.Always)] public global::G.AnyOf Model { get; set; } = default!; @@ -43,7 +43,7 @@ public sealed partial class CreateCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::Newtonsoft.Json.JsonProperty("frequency_penalty")] @@ -86,7 +86,7 @@ public sealed partial class CreateCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::Newtonsoft.Json.JsonProperty("presence_penalty")] @@ -148,7 +148,7 @@ public sealed partial class CreateCompletionRequest public double? TopP { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -165,7 +165,7 @@ public sealed partial class CreateCompletionRequest /// Initializes a new instance of the class. ///
/// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -184,7 +184,7 @@ public sealed partial class CreateCompletionRequest /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -210,7 +210,7 @@ public sealed partial class CreateCompletionRequest /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -245,7 +245,7 @@ public sealed partial class CreateCompletionRequest /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// public CreateCompletionRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs index 54ce541988..e14c9950d9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs @@ -20,7 +20,7 @@ public sealed partial class CreateEmbeddingRequest public global::G.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>> Input { get; set; } = default!; /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small ///
/// text-embedding-3-small @@ -43,7 +43,7 @@ public sealed partial class CreateEmbeddingRequest public int? Dimensions { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -64,7 +64,7 @@ public sealed partial class CreateEmbeddingRequest /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -76,7 +76,7 @@ public sealed partial class CreateEmbeddingRequest /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// public CreateEmbeddingRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs index 62f60f3070..9784b1304b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs @@ -13,7 +13,7 @@ public sealed partial class CreateFineTuningJobRequest { /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini ///
/// gpt-4o-mini @@ -39,7 +39,7 @@ public sealed partial class CreateFineTuningJobRequest public global::G.CreateFineTuningJobRequestHyperparameters? Hyperparameters { get; set; } /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. ///
[global::Newtonsoft.Json.JsonProperty("suffix")] @@ -85,7 +85,7 @@ public sealed partial class CreateFineTuningJobRequest ///
/// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// @@ -100,7 +100,7 @@ public sealed partial class CreateFineTuningJobRequest /// The hyperparameters used for the fine-tuning job. /// /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageEditRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageEditRequest.g.verified.cs index dba04e48fa..a440dbe31d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageEditRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageEditRequest.g.verified.cs @@ -80,7 +80,7 @@ public sealed partial class CreateImageEditRequest public global::G.CreateImageEditRequestResponseFormat? ResponseFormat { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -133,7 +133,7 @@ public sealed partial class CreateImageEditRequest /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// public CreateImageEditRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageRequest.g.verified.cs index ec63a6f53c..317771df9c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageRequest.g.verified.cs @@ -74,7 +74,7 @@ public sealed partial class CreateImageRequest public global::G.CreateImageRequestStyle? Style { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -125,7 +125,7 @@ public sealed partial class CreateImageRequest /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// public CreateImageRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageVariationRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageVariationRequest.g.verified.cs index 7dfe4c9bdc..8159284ca3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageVariationRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateImageVariationRequest.g.verified.cs @@ -60,7 +60,7 @@ public sealed partial class CreateImageVariationRequest public global::G.CreateImageVariationRequestSize? Size { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -103,7 +103,7 @@ public sealed partial class CreateImageVariationRequest /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// public CreateImageVariationRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequest.g.verified.cs index fa8f2f62cb..e84c6770e4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequest.g.verified.cs @@ -32,7 +32,7 @@ public sealed partial class CreateMessageRequest public global::System.Collections.Generic.IList? Attachments { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -56,7 +56,7 @@ public sealed partial class CreateMessageRequest /// A list of files attached to the message, and the tools they should be added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public CreateMessageRequest( global::G.CreateMessageRequestRole role, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs index bdb14caea5..98797aeac9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class CreateMessageRequestAttachment /// The tools to add this file to. ///
[global::Newtonsoft.Json.JsonProperty("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -38,7 +38,7 @@ public sealed partial class CreateMessageRequestAttachment /// public CreateMessageRequestAttachment( string? fileId, - global::System.Collections.Generic.IList? tools) + global::System.Collections.Generic.IList? tools) { this.FileId = fileId; this.Tools = tools; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs index 38770b490f..9aad86dccf 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateMessageRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequest.g.verified.cs index 2e1d096d9e..5f2ceaa187 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequest.g.verified.cs @@ -12,18 +12,20 @@ namespace G public sealed partial class CreateModerationRequest { /// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. ///
[global::Newtonsoft.Json.JsonProperty("input", Required = global::Newtonsoft.Json.Required.Always)] - public global::G.OneOf> Input { get; set; } = default!; + public global::G.OneOf, global::System.Collections.Generic.IList> Input { get; set; } = default!; /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 ///
- /// text-moderation-stable + /// omni-moderation-2024-09-26 [global::Newtonsoft.Json.JsonProperty("model")] public global::G.AnyOf? Model { get; set; } @@ -37,16 +39,18 @@ public sealed partial class CreateModerationRequest /// Initializes a new instance of the class. ///
/// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. /// /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 /// public CreateModerationRequest( - global::G.OneOf> input, + global::G.OneOf, global::System.Collections.Generic.IList> input, global::G.AnyOf? model) { this.Input = input; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.verified.cs new file mode 100644 index 0000000000..e314093fdd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemDiscriminator + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemDiscriminator? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.verified.cs new file mode 100644 index 0000000000..f5ef5fbe1a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.verified.cs @@ -0,0 +1,41 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class CreateModerationRequestInputVariant3ItemDiscriminator + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + public CreateModerationRequestInputVariant3ItemDiscriminator( + global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? type) + { + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemDiscriminator() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs new file mode 100644 index 0000000000..3fb10b388a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationRequestInputVariant3ItemDiscriminatorType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image_url")] + ImageUrl, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationRequestInputVariant3ItemDiscriminatorTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationRequestInputVariant3ItemDiscriminatorType value) + { + return value switch + { + CreateModerationRequestInputVariant3ItemDiscriminatorType.ImageUrl => "image_url", + CreateModerationRequestInputVariant3ItemDiscriminatorType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationRequestInputVariant3ItemDiscriminatorType? ToEnum(string value) + { + return value switch + { + "image_url" => CreateModerationRequestInputVariant3ItemDiscriminatorType.ImageUrl, + "text" => CreateModerationRequestInputVariant3ItemDiscriminatorType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.verified.cs new file mode 100644 index 0000000000..c9f6cd51cc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1 + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemVariant1? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.verified.cs new file mode 100644 index 0000000000..93f460d023 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.cs + +#nullable enable + +namespace G +{ + /// + /// An object describing an image to classify. + /// + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1 + { + /// + /// Always `image_url`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.CreateModerationRequestInputVariant3ItemVariant1Type Type { get; set; } + + /// + /// Contains either an image URL or a data URL for a base64 encoded image. + /// + [global::Newtonsoft.Json.JsonProperty("image_url", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl ImageUrl { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Always `image_url`. + /// + /// + /// Contains either an image URL or a data URL for a base64 encoded image. + /// + public CreateModerationRequestInputVariant3ItemVariant1( + global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl imageUrl, + global::G.CreateModerationRequestInputVariant3ItemVariant1Type type) + { + this.ImageUrl = imageUrl ?? throw new global::System.ArgumentNullException(nameof(imageUrl)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemVariant1() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.verified.cs new file mode 100644 index 0000000000..e6ce6e8672 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1ImageUrl + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.verified.cs new file mode 100644 index 0000000000..70e1cadd84 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.cs + +#nullable enable + +namespace G +{ + /// + /// Contains either an image URL or a data URL for a base64 encoded image. + /// + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1ImageUrl + { + /// + /// Either a URL of the image or the base64 encoded image data.
+ /// Example: https://example.com/image.jpg + ///
+ /// https://example.com/image.jpg + [global::Newtonsoft.Json.JsonProperty("url", Required = global::Newtonsoft.Json.Required.Always)] + public string Url { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Either a URL of the image or the base64 encoded image data.
+ /// Example: https://example.com/image.jpg + /// + public CreateModerationRequestInputVariant3ItemVariant1ImageUrl( + string url) + { + this.Url = url ?? throw new global::System.ArgumentNullException(nameof(url)); + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemVariant1ImageUrl() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs new file mode 100644 index 0000000000..4bc6667434 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.cs + +#nullable enable + +namespace G +{ + /// + /// Always `image_url`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationRequestInputVariant3ItemVariant1Type + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image_url")] + ImageUrl, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationRequestInputVariant3ItemVariant1TypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationRequestInputVariant3ItemVariant1Type value) + { + return value switch + { + CreateModerationRequestInputVariant3ItemVariant1Type.ImageUrl => "image_url", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationRequestInputVariant3ItemVariant1Type? ToEnum(string value) + { + return value switch + { + "image_url" => CreateModerationRequestInputVariant3ItemVariant1Type.ImageUrl, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.verified.cs new file mode 100644 index 0000000000..5f7d1bb93d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemVariant2 + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemVariant2? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.verified.cs new file mode 100644 index 0000000000..7041a14f97 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.cs + +#nullable enable + +namespace G +{ + /// + /// An object describing text to classify. + /// + public sealed partial class CreateModerationRequestInputVariant3ItemVariant2 + { + /// + /// Always `text`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.CreateModerationRequestInputVariant3ItemVariant2Type Type { get; set; } + + /// + /// A string of text to classify.
+ /// Example: I want to kill them + ///
+ /// I want to kill them + [global::Newtonsoft.Json.JsonProperty("text", Required = global::Newtonsoft.Json.Required.Always)] + public string Text { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Always `text`. + /// + /// + /// A string of text to classify.
+ /// Example: I want to kill them + /// + public CreateModerationRequestInputVariant3ItemVariant2( + string text, + global::G.CreateModerationRequestInputVariant3ItemVariant2Type type) + { + this.Text = text ?? throw new global::System.ArgumentNullException(nameof(text)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemVariant2() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs new file mode 100644 index 0000000000..601b615e5d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.cs + +#nullable enable + +namespace G +{ + /// + /// Always `text`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationRequestInputVariant3ItemVariant2Type + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationRequestInputVariant3ItemVariant2TypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationRequestInputVariant3ItemVariant2Type value) + { + return value switch + { + CreateModerationRequestInputVariant3ItemVariant2Type.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationRequestInputVariant3ItemVariant2Type? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationRequestInputVariant3ItemVariant2Type.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestModel.g.verified.cs index 9723d55ad9..a8aabff068 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationRequestModel.g.verified.cs @@ -10,6 +10,16 @@ namespace G [global::System.Runtime.Serialization.DataContract] public enum CreateModerationRequestModel { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="omni-moderation-latest")] + OmniModerationLatest, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="omni-moderation-2024-09-26")] + OmniModeration20240926, /// /// /// @@ -34,6 +44,8 @@ public static string ToValueString(this CreateModerationRequestModel value) { return value switch { + CreateModerationRequestModel.OmniModerationLatest => "omni-moderation-latest", + CreateModerationRequestModel.OmniModeration20240926 => "omni-moderation-2024-09-26", CreateModerationRequestModel.TextModerationLatest => "text-moderation-latest", CreateModerationRequestModel.TextModerationStable => "text-moderation-stable", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), @@ -46,6 +58,8 @@ public static string ToValueString(this CreateModerationRequestModel value) { return value switch { + "omni-moderation-latest" => CreateModerationRequestModel.OmniModerationLatest, + "omni-moderation-2024-09-26" => CreateModerationRequestModel.OmniModeration20240926, "text-moderation-latest" => CreateModerationRequestModel.TextModerationLatest, "text-moderation-stable" => CreateModerationRequestModel.TextModerationStable, _ => null, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResult.g.verified.cs index f7f55d434a..456ae033ac 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResult.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResult.g.verified.cs @@ -27,6 +27,12 @@ public sealed partial class CreateModerationResponseResult [global::Newtonsoft.Json.JsonProperty("category_scores", Required = global::Newtonsoft.Json.Required.Always)] public global::G.CreateModerationResponseResultCategoryScores CategoryScores { get; set; } = default!; + /// + /// A list of the categories along with the input type(s) that the score applies to. + /// + [global::Newtonsoft.Json.JsonProperty("category_applied_input_types", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.CreateModerationResponseResultCategoryAppliedInputTypes CategoryAppliedInputTypes { get; set; } = default!; + /// /// Additional properties that are not explicitly defined in the schema /// @@ -45,14 +51,19 @@ public sealed partial class CreateModerationResponseResult /// /// A list of the categories along with their scores as predicted by model. /// + /// + /// A list of the categories along with the input type(s) that the score applies to. + /// public CreateModerationResponseResult( bool flagged, global::G.CreateModerationResponseResultCategories categories, - global::G.CreateModerationResponseResultCategoryScores categoryScores) + global::G.CreateModerationResponseResultCategoryScores categoryScores, + global::G.CreateModerationResponseResultCategoryAppliedInputTypes categoryAppliedInputTypes) { this.Flagged = flagged; this.Categories = categories ?? throw new global::System.ArgumentNullException(nameof(categories)); this.CategoryScores = categoryScores ?? throw new global::System.ArgumentNullException(nameof(categoryScores)); + this.CategoryAppliedInputTypes = categoryAppliedInputTypes ?? throw new global::System.ArgumentNullException(nameof(categoryAppliedInputTypes)); } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs index 4c6f5e442c..5b722543a3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs @@ -33,6 +33,18 @@ public sealed partial class CreateModerationResponseResultCategories [global::Newtonsoft.Json.JsonProperty("harassment/threatening", Required = global::Newtonsoft.Json.Required.Always)] public bool HarassmentThreatening { get; set; } = default!; + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category. + /// + [global::Newtonsoft.Json.JsonProperty("illicit", Required = global::Newtonsoft.Json.Required.Always)] + public bool Illicit { get; set; } = default!; + + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon. + /// + [global::Newtonsoft.Json.JsonProperty("illicit/violent", Required = global::Newtonsoft.Json.Required.Always)] + public bool IllicitViolent { get; set; } = default!; + /// /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. /// @@ -96,6 +108,12 @@ public sealed partial class CreateModerationResponseResultCategories /// /// Harassment content that also includes violence or serious harm towards any target. /// + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category. + /// + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon. + /// /// /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. /// @@ -122,6 +140,8 @@ public CreateModerationResponseResultCategories( bool hateThreatening, bool harassment, bool harassmentThreatening, + bool illicit, + bool illicitViolent, bool selfHarm, bool selfHarmIntent, bool selfHarmInstructions, @@ -134,6 +154,8 @@ public CreateModerationResponseResultCategories( this.HateThreatening = hateThreatening; this.Harassment = harassment; this.HarassmentThreatening = harassmentThreatening; + this.Illicit = illicit; + this.IllicitViolent = illicitViolent; this.SelfHarm = selfHarm; this.SelfHarmIntent = selfHarmIntent; this.SelfHarmInstructions = selfHarmInstructions; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.verified.cs new file mode 100644 index 0000000000..c64160de12 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationResponseResultCategoryAppliedInputTypes + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationResponseResultCategoryAppliedInputTypes? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.verified.cs new file mode 100644 index 0000000000..3da69a4978 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.verified.cs @@ -0,0 +1,175 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.cs + +#nullable enable + +namespace G +{ + /// + /// A list of the categories along with the input type(s) that the score applies to. + /// + public sealed partial class CreateModerationResponseResultCategoryAppliedInputTypes + { + /// + /// The applied input type(s) for the category 'hate'. + /// + [global::Newtonsoft.Json.JsonProperty("hate", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Hate { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'hate/threatening'. + /// + [global::Newtonsoft.Json.JsonProperty("hate/threatening", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList HateThreatening { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'harassment'. + /// + [global::Newtonsoft.Json.JsonProperty("harassment", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Harassment { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'harassment/threatening'. + /// + [global::Newtonsoft.Json.JsonProperty("harassment/threatening", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList HarassmentThreatening { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'illicit'. + /// + [global::Newtonsoft.Json.JsonProperty("illicit", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Illicit { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'illicit/violent'. + /// + [global::Newtonsoft.Json.JsonProperty("illicit/violent", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList IllicitViolent { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'self-harm'. + /// + [global::Newtonsoft.Json.JsonProperty("self-harm", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList SelfHarm { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'self-harm/intent'. + /// + [global::Newtonsoft.Json.JsonProperty("self-harm/intent", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList SelfHarmIntent { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'self-harm/instructions'. + /// + [global::Newtonsoft.Json.JsonProperty("self-harm/instructions", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList SelfHarmInstructions { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'sexual'. + /// + [global::Newtonsoft.Json.JsonProperty("sexual", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Sexual { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'sexual/minors'. + /// + [global::Newtonsoft.Json.JsonProperty("sexual/minors", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList SexualMinors { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'violence'. + /// + [global::Newtonsoft.Json.JsonProperty("violence", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Violence { get; set; } = default!; + + /// + /// The applied input type(s) for the category 'violence/graphic'. + /// + [global::Newtonsoft.Json.JsonProperty("violence/graphic", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList ViolenceGraphic { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The applied input type(s) for the category 'hate'. + /// + /// + /// The applied input type(s) for the category 'hate/threatening'. + /// + /// + /// The applied input type(s) for the category 'harassment'. + /// + /// + /// The applied input type(s) for the category 'harassment/threatening'. + /// + /// + /// The applied input type(s) for the category 'illicit'. + /// + /// + /// The applied input type(s) for the category 'illicit/violent'. + /// + /// + /// The applied input type(s) for the category 'self-harm'. + /// + /// + /// The applied input type(s) for the category 'self-harm/intent'. + /// + /// + /// The applied input type(s) for the category 'self-harm/instructions'. + /// + /// + /// The applied input type(s) for the category 'sexual'. + /// + /// + /// The applied input type(s) for the category 'sexual/minors'. + /// + /// + /// The applied input type(s) for the category 'violence'. + /// + /// + /// The applied input type(s) for the category 'violence/graphic'. + /// + public CreateModerationResponseResultCategoryAppliedInputTypes( + global::System.Collections.Generic.IList hate, + global::System.Collections.Generic.IList hateThreatening, + global::System.Collections.Generic.IList harassment, + global::System.Collections.Generic.IList harassmentThreatening, + global::System.Collections.Generic.IList illicit, + global::System.Collections.Generic.IList illicitViolent, + global::System.Collections.Generic.IList selfHarm, + global::System.Collections.Generic.IList selfHarmIntent, + global::System.Collections.Generic.IList selfHarmInstructions, + global::System.Collections.Generic.IList sexual, + global::System.Collections.Generic.IList sexualMinors, + global::System.Collections.Generic.IList violence, + global::System.Collections.Generic.IList violenceGraphic) + { + this.Hate = hate ?? throw new global::System.ArgumentNullException(nameof(hate)); + this.HateThreatening = hateThreatening ?? throw new global::System.ArgumentNullException(nameof(hateThreatening)); + this.Harassment = harassment ?? throw new global::System.ArgumentNullException(nameof(harassment)); + this.HarassmentThreatening = harassmentThreatening ?? throw new global::System.ArgumentNullException(nameof(harassmentThreatening)); + this.Illicit = illicit ?? throw new global::System.ArgumentNullException(nameof(illicit)); + this.IllicitViolent = illicitViolent ?? throw new global::System.ArgumentNullException(nameof(illicitViolent)); + this.SelfHarm = selfHarm ?? throw new global::System.ArgumentNullException(nameof(selfHarm)); + this.SelfHarmIntent = selfHarmIntent ?? throw new global::System.ArgumentNullException(nameof(selfHarmIntent)); + this.SelfHarmInstructions = selfHarmInstructions ?? throw new global::System.ArgumentNullException(nameof(selfHarmInstructions)); + this.Sexual = sexual ?? throw new global::System.ArgumentNullException(nameof(sexual)); + this.SexualMinors = sexualMinors ?? throw new global::System.ArgumentNullException(nameof(sexualMinors)); + this.Violence = violence ?? throw new global::System.ArgumentNullException(nameof(violence)); + this.ViolenceGraphic = violenceGraphic ?? throw new global::System.ArgumentNullException(nameof(violenceGraphic)); + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationResponseResultCategoryAppliedInputTypes() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs new file mode 100644 index 0000000000..a05cf5c9ef --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs new file mode 100644 index 0000000000..b1e8c77d59 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs new file mode 100644 index 0000000000..14a56fa877 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesHateItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHateItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHateItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHateItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHateItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHateItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs new file mode 100644 index 0000000000..691619b02a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs new file mode 100644 index 0000000000..2b18326da3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs new file mode 100644 index 0000000000..dc5ff2ceff --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs new file mode 100644 index 0000000000..960ac285e7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image")] + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs new file mode 100644 index 0000000000..e88114e4ea --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image")] + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs new file mode 100644 index 0000000000..4c2db88bf1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image")] + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs new file mode 100644 index 0000000000..985a59c2e2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesSexualItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image")] + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSexualItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSexualItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs new file mode 100644 index 0000000000..6a54028fcd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs new file mode 100644 index 0000000000..c649433807 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image")] + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs new file mode 100644 index 0000000000..1d5531d888 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image")] + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs index 051d7fcfa2..4133f7586f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs @@ -33,6 +33,18 @@ public sealed partial class CreateModerationResponseResultCategoryScores [global::Newtonsoft.Json.JsonProperty("harassment/threatening", Required = global::Newtonsoft.Json.Required.Always)] public double HarassmentThreatening { get; set; } = default!; + /// + /// The score for the category 'illicit'. + /// + [global::Newtonsoft.Json.JsonProperty("illicit", Required = global::Newtonsoft.Json.Required.Always)] + public double Illicit { get; set; } = default!; + + /// + /// The score for the category 'illicit/violent'. + /// + [global::Newtonsoft.Json.JsonProperty("illicit/violent", Required = global::Newtonsoft.Json.Required.Always)] + public double IllicitViolent { get; set; } = default!; + /// /// The score for the category 'self-harm'. /// @@ -96,6 +108,12 @@ public sealed partial class CreateModerationResponseResultCategoryScores /// /// The score for the category 'harassment/threatening'. /// + /// + /// The score for the category 'illicit'. + /// + /// + /// The score for the category 'illicit/violent'. + /// /// /// The score for the category 'self-harm'. /// @@ -122,6 +140,8 @@ public CreateModerationResponseResultCategoryScores( double hateThreatening, double harassment, double harassmentThreatening, + double illicit, + double illicitViolent, double selfHarm, double selfHarmIntent, double selfHarmInstructions, @@ -134,6 +154,8 @@ public CreateModerationResponseResultCategoryScores( this.HateThreatening = hateThreatening; this.Harassment = harassment; this.HarassmentThreatening = harassmentThreatening; + this.Illicit = illicit; + this.IllicitViolent = illicitViolent; this.SelfHarm = selfHarm; this.SelfHarmIntent = selfHarmIntent; this.SelfHarmInstructions = selfHarmInstructions; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunIncludeItem.g.verified.cs new file mode 100644 index 0000000000..d4a9bcbb49 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunIncludeItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateRunIncludeItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum CreateRunIncludeItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="step_details.tool_calls[*].file_search.results[*].content")] + StepDetailsToolCallsAnyFileSearchResultsAnyContent, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateRunIncludeItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateRunIncludeItem value) + { + return value switch + { + CreateRunIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent => "step_details.tool_calls[*].file_search.results[*].content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateRunIncludeItem? ToEnum(string value) + { + return value switch + { + "step_details.tool_calls[*].file_search.results[*].content" => CreateRunIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequest.g.verified.cs index dd77cc85e3..c5aae57eb9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequest.g.verified.cs @@ -47,16 +47,16 @@ public sealed partial class CreateRunRequest /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// [global::Newtonsoft.Json.JsonProperty("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -65,7 +65,8 @@ public sealed partial class CreateRunRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -108,15 +109,16 @@ public sealed partial class CreateRunRequest public global::G.AssistantsApiToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::Newtonsoft.Json.JsonProperty("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::Newtonsoft.Json.JsonProperty("response_format")] @@ -151,15 +153,16 @@ public sealed partial class CreateRunRequest /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -183,12 +186,13 @@ public sealed partial class CreateRunRequest /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// public CreateRunRequest( @@ -197,7 +201,7 @@ public CreateRunRequest( string? instructions, string? additionalInstructions, global::System.Collections.Generic.IList? additionalMessages, - global::System.Collections.Generic.IList? tools, + global::System.Collections.Generic.IList? tools, object? metadata, double? temperature, double? topP, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs index 509cf8f735..de73830f2a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateRunRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestModel.g.verified.cs index 269cb91cd1..df3d36e1f9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateRunRequestModel.g.verified.cs @@ -18,6 +18,11 @@ public enum CreateRunRequestModel /// /// /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-11-20")] + Gpt4o20241120, + /// + /// + /// [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-08-06")] Gpt4o20240806, /// @@ -140,6 +145,7 @@ public static string ToValueString(this CreateRunRequestModel value) return value switch { CreateRunRequestModel.Gpt4o => "gpt-4o", + CreateRunRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateRunRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateRunRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", CreateRunRequestModel.Gpt4oMini => "gpt-4o-mini", @@ -173,6 +179,7 @@ public static string ToValueString(this CreateRunRequestModel value) return value switch { "gpt-4o" => CreateRunRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateRunRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateRunRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateRunRequestModel.Gpt4o20240513, "gpt-4o-mini" => CreateRunRequestModel.Gpt4oMini, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequest.g.verified.cs index d0c06060ee..3ce1071224 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequest.g.verified.cs @@ -12,7 +12,7 @@ namespace G public sealed partial class CreateSpeechRequest { /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// [global::Newtonsoft.Json.JsonProperty("model", Required = global::Newtonsoft.Json.Required.Always)] public global::G.AnyOf Model { get; set; } = default!; @@ -24,7 +24,7 @@ public sealed partial class CreateSpeechRequest public string Input { get; set; } = default!; /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// [global::Newtonsoft.Json.JsonProperty("voice", Required = global::Newtonsoft.Json.Required.Always)] public global::G.CreateSpeechRequestVoice Voice { get; set; } = default!; @@ -53,13 +53,13 @@ public sealed partial class CreateSpeechRequest /// Initializes a new instance of the class. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs index f8dc60f20e..f7c4e0bf46 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// [global::System.Runtime.Serialization.DataContract] public enum CreateSpeechRequestVoice diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs index d8b1442253..b3dd6f571e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs @@ -41,7 +41,7 @@ public sealed partial class CreateThreadAndRunRequest /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. ///
[global::Newtonsoft.Json.JsonProperty("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @@ -50,13 +50,13 @@ public sealed partial class CreateThreadAndRunRequest public global::G.CreateThreadAndRunRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -65,7 +65,8 @@ public sealed partial class CreateThreadAndRunRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -108,15 +109,16 @@ public sealed partial class CreateThreadAndRunRequest public global::G.AssistantsApiToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::Newtonsoft.Json.JsonProperty("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::Newtonsoft.Json.JsonProperty("response_format")] @@ -149,15 +151,16 @@ public sealed partial class CreateThreadAndRunRequest /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -181,12 +184,13 @@ public sealed partial class CreateThreadAndRunRequest /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// public CreateThreadAndRunRequest( @@ -194,7 +198,7 @@ public CreateThreadAndRunRequest( global::G.CreateThreadRequest? thread, global::G.AnyOf? model, string? instructions, - global::System.Collections.Generic.IList? tools, + global::System.Collections.Generic.IList? tools, global::G.CreateThreadAndRunRequestToolResources? toolResources, object? metadata, double? temperature, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs index 47ccc2aa31..670bf455aa 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateThreadAndRunRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs index fcd5d3948a..d4411d5c25 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs @@ -18,6 +18,11 @@ public enum CreateThreadAndRunRequestModel /// /// /// + [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-11-20")] + Gpt4o20241120, + /// + /// + /// [global::System.Runtime.Serialization.EnumMember(Value="gpt-4o-2024-08-06")] Gpt4o20240806, /// @@ -140,6 +145,7 @@ public static string ToValueString(this CreateThreadAndRunRequestModel value) return value switch { CreateThreadAndRunRequestModel.Gpt4o => "gpt-4o", + CreateThreadAndRunRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateThreadAndRunRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateThreadAndRunRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", CreateThreadAndRunRequestModel.Gpt4oMini => "gpt-4o-mini", @@ -173,6 +179,7 @@ public static string ToValueString(this CreateThreadAndRunRequestModel value) return value switch { "gpt-4o" => CreateThreadAndRunRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateThreadAndRunRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateThreadAndRunRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateThreadAndRunRequestModel.Gpt4o20240513, "gpt-4o-mini" => CreateThreadAndRunRequestModel.Gpt4oMini, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs index 5933d3aff4..26c7ce0d93 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class CreateThreadAndRunRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class CreateThreadAndRunRequestToolResourcesCodeInterprete /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// public CreateThreadAndRunRequestToolResourcesCodeInterpreter( global::System.Collections.Generic.IList? fileIds) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequest.g.verified.cs index 0ed6648a15..f59ee929a5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequest.g.verified.cs @@ -22,7 +22,7 @@ public sealed partial class CreateThreadRequest public global::G.CreateThreadRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -43,7 +43,7 @@ public sealed partial class CreateThreadRequest /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public CreateThreadRequest( global::System.Collections.Generic.IList? messages, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs index 02113cbd6d..b05983ba59 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateThreadRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs index 68fab0b336..94fb275293 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class CreateThreadRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class CreateThreadRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// public CreateThreadRequestToolResourcesCodeInterpreter( global::System.Collections.Generic.IList? fileIds) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs index 73932e1db6..35d1e85ef6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs @@ -22,7 +22,7 @@ public sealed partial class CreateThreadRequestToolResourcesFileSearchVectorStor public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? ChunkingStrategy { get; set; } /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -43,7 +43,7 @@ public sealed partial class CreateThreadRequestToolResourcesFileSearchVectorStor /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public CreateThreadRequestToolResourcesFileSearchVectorStore( global::System.Collections.Generic.IList? fileIds, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs index 48aee0c998..e7a956be89 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs index f7bc0d0f06..8017d45503 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs @@ -38,17 +38,17 @@ public sealed partial class CreateTranscriptionRequest public string? Language { get; set; } /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// [global::Newtonsoft.Json.JsonProperty("prompt")] public string? Prompt { get; set; } /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json ///
[global::Newtonsoft.Json.JsonProperty("response_format")] - public global::G.CreateTranscriptionRequestResponseFormat? ResponseFormat { get; set; } + public global::G.AudioResponseFormat? ResponseFormat { get; set; } /// /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
@@ -87,10 +87,10 @@ public sealed partial class CreateTranscriptionRequest /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -107,7 +107,7 @@ public CreateTranscriptionRequest( global::G.AnyOf model, string? language, string? prompt, - global::G.CreateTranscriptionRequestResponseFormat? responseFormat, + global::G.AudioResponseFormat? responseFormat, double? temperature, global::System.Collections.Generic.IList? timestampGranularities) { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranslationRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranslationRequest.g.verified.cs index b6612a51f5..7616153e1f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranslationRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateTranslationRequest.g.verified.cs @@ -32,17 +32,17 @@ public sealed partial class CreateTranslationRequest public global::G.AnyOf Model { get; set; } = default!; /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// [global::Newtonsoft.Json.JsonProperty("prompt")] public string? Prompt { get; set; } /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json ///
[global::Newtonsoft.Json.JsonProperty("response_format")] - public string? ResponseFormat { get; set; } + public global::G.AudioResponseFormat? ResponseFormat { get; set; } /// /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
@@ -71,10 +71,10 @@ public sealed partial class CreateTranslationRequest /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -86,7 +86,7 @@ public CreateTranslationRequest( string filename, global::G.AnyOf model, string? prompt, - string? responseFormat, + global::G.AudioResponseFormat? responseFormat, double? temperature) { this.File = file ?? throw new global::System.ArgumentNullException(nameof(file)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs index 4c607eb9ae..bb23119f2b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs @@ -34,7 +34,7 @@ public sealed partial class CreateVectorStoreRequest public global::G.CreateVectorStoreRequestChunkingStrategy? ChunkingStrategy { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -61,7 +61,7 @@ public sealed partial class CreateVectorStoreRequest /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public CreateVectorStoreRequest( global::System.Collections.Generic.IList? fileIds, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs index 394363e070..be10115ae3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateVectorStoreRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ErrorEvent.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ErrorEvent.g.verified.cs index 9ac746b504..49247c3f9b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ErrorEvent.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ErrorEvent.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + /// Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. /// public sealed partial class ErrorEvent { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptions.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptions.Json.g.verified.cs new file mode 100644 index 0000000000..1ba5f67b7f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptions.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.FileSearchRankingOptions.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class FileSearchRankingOptions + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.FileSearchRankingOptions? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptions.g.verified.cs new file mode 100644 index 0000000000..0ac3a5b115 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptions.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.FileSearchRankingOptions.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
+ /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + ///
+ public sealed partial class FileSearchRankingOptions + { + /// + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + /// + [global::Newtonsoft.Json.JsonProperty("ranker")] + public global::G.FileSearchRankingOptionsRanker? Ranker { get; set; } + + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + [global::Newtonsoft.Json.JsonProperty("score_threshold", Required = global::Newtonsoft.Json.Required.Always)] + public double ScoreThreshold { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + /// + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + public FileSearchRankingOptions( + double scoreThreshold, + global::G.FileSearchRankingOptionsRanker? ranker) + { + this.ScoreThreshold = scoreThreshold; + this.Ranker = ranker; + } + + /// + /// Initializes a new instance of the class. + /// + public FileSearchRankingOptions() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptionsRanker.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptionsRanker.g.verified.cs new file mode 100644 index 0000000000..a0161bac61 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FileSearchRankingOptionsRanker.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.FileSearchRankingOptionsRanker.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + /// + [global::System.Runtime.Serialization.DataContract] + public enum FileSearchRankingOptionsRanker + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="auto")] + Auto, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="default_2024_08_21")] + Default20240821, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FileSearchRankingOptionsRankerExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FileSearchRankingOptionsRanker value) + { + return value switch + { + FileSearchRankingOptionsRanker.Auto => "auto", + FileSearchRankingOptionsRanker.Default20240821 => "default_2024_08_21", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FileSearchRankingOptionsRanker? ToEnum(string value) + { + return value switch + { + "auto" => FileSearchRankingOptionsRanker.Auto, + "default_2024_08_21" => FileSearchRankingOptionsRanker.Default20240821, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs index f7cac62a42..39f1178887 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs @@ -24,7 +24,8 @@ public sealed partial class FinetuneChatRequestInput public global::System.Collections.Generic.IList? Tools { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::Newtonsoft.Json.JsonProperty("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } @@ -50,7 +51,8 @@ public sealed partial class FinetuneChatRequestInput /// A list of tools the model may generate JSON inputs for. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// public FinetuneChatRequestInput( global::System.Collections.Generic.IList>? messages, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.GetRunStepIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.GetRunStepIncludeItem.g.verified.cs new file mode 100644 index 0000000000..40efcfe512 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.GetRunStepIncludeItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.GetRunStepIncludeItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum GetRunStepIncludeItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="step_details.tool_calls[*].file_search.results[*].content")] + StepDetailsToolCallsAnyFileSearchResultsAnyContent, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class GetRunStepIncludeItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this GetRunStepIncludeItem value) + { + return value switch + { + GetRunStepIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent => "step_details.tool_calls[*].file_search.results[*].content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static GetRunStepIncludeItem? ToEnum(string value) + { + return value switch + { + "step_details.tool_calls[*].file_search.results[*].content" => GetRunStepIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.InputVariant3Item.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.InputVariant3Item.Json.g.verified.cs new file mode 100644 index 0000000000..affbfe4a53 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.InputVariant3Item.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.InputVariant3Item.Json.g.cs +#nullable enable + +namespace G +{ + public readonly partial struct InputVariant3Item + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.InputVariant3Item? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.InputVariant3Item.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.InputVariant3Item.g.verified.cs new file mode 100644 index 0000000000..f4880ac123 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.InputVariant3Item.g.verified.cs @@ -0,0 +1,223 @@ +//HintName: G.Models.InputVariant3Item.g.cs +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// + /// + public readonly partial struct InputVariant3Item : global::System.IEquatable + { + /// + /// + /// + public global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type { get; } + + /// + /// An object describing an image to classify. + /// +#if NET6_0_OR_GREATER + public global::G.CreateModerationRequestInputVariant3ItemVariant1? ImageUrl { get; init; } +#else + public global::G.CreateModerationRequestInputVariant3ItemVariant1? ImageUrl { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] +#endif + public bool IsImageUrl => ImageUrl != null; + + /// + /// + /// + public static implicit operator InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant1 value) => new InputVariant3Item(value); + + /// + /// + /// + public static implicit operator global::G.CreateModerationRequestInputVariant3ItemVariant1?(InputVariant3Item @this) => @this.ImageUrl; + + /// + /// + /// + public InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant1? value) + { + ImageUrl = value; + } + + /// + /// An object describing text to classify. + /// +#if NET6_0_OR_GREATER + public global::G.CreateModerationRequestInputVariant3ItemVariant2? Text { get; init; } +#else + public global::G.CreateModerationRequestInputVariant3ItemVariant2? Text { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] +#endif + public bool IsText => Text != null; + + /// + /// + /// + public static implicit operator InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant2 value) => new InputVariant3Item(value); + + /// + /// + /// + public static implicit operator global::G.CreateModerationRequestInputVariant3ItemVariant2?(InputVariant3Item @this) => @this.Text; + + /// + /// + /// + public InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant2? value) + { + Text = value; + } + + /// + /// + /// + public InputVariant3Item( + global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? type, + global::G.CreateModerationRequestInputVariant3ItemVariant1? imageUrl, + global::G.CreateModerationRequestInputVariant3ItemVariant2? text + ) + { + Type = type; + + ImageUrl = imageUrl; + Text = text; + } + + /// + /// + /// + public object? Object => + Text as object ?? + ImageUrl as object + ; + + /// + /// + /// + public bool Validate() + { + return IsImageUrl && !IsText || !IsImageUrl && IsText; + } + + /// + /// + /// + public TResult? Match( + global::System.Func? imageUrl = null, + global::System.Func? text = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsImageUrl && imageUrl != null) + { + return imageUrl(ImageUrl!); + } + else if (IsText && text != null) + { + return text(Text!); + } + + return default(TResult); + } + + /// + /// + /// + public void Match( + global::System.Action? imageUrl = null, + global::System.Action? text = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsImageUrl) + { + imageUrl?.Invoke(ImageUrl!); + } + else if (IsText) + { + text?.Invoke(Text!); + } + } + + /// + /// + /// + public override int GetHashCode() + { + var fields = new object?[] + { + ImageUrl, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1), + Text, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant2), + }; + const int offset = unchecked((int)2166136261); + const int prime = 16777619; + static int HashCodeAggregator(int hashCode, object? value) => value == null + ? (hashCode ^ 0) * prime + : (hashCode ^ value.GetHashCode()) * prime; + + return global::System.Linq.Enumerable.Aggregate(fields, offset, HashCodeAggregator); + } + + /// + /// + /// + public bool Equals(InputVariant3Item other) + { + return + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) + ; + } + + /// + /// + /// + public static bool operator ==(InputVariant3Item obj1, InputVariant3Item obj2) + { + return global::System.Collections.Generic.EqualityComparer.Default.Equals(obj1, obj2); + } + + /// + /// + /// + public static bool operator !=(InputVariant3Item obj1, InputVariant3Item obj2) + { + return !(obj1 == obj2); + } + + /// + /// + /// + public override bool Equals(object? obj) + { + return obj is InputVariant3Item o && Equals(o); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesOrder.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesOrder.g.verified.cs new file mode 100644 index 0000000000..5a53f57cea --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesOrder.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.ListFilesOrder.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: desc + /// + [global::System.Runtime.Serialization.DataContract] + public enum ListFilesOrder + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="asc")] + Asc, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="desc")] + Desc, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ListFilesOrderExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ListFilesOrder value) + { + return value switch + { + ListFilesOrder.Asc => "asc", + ListFilesOrder.Desc => "desc", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ListFilesOrder? ToEnum(string value) + { + return value switch + { + "asc" => ListFilesOrder.Asc, + "desc" => ListFilesOrder.Desc, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesResponse.g.verified.cs index ae9cb16495..fc8d8ca471 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesResponse.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesResponse.g.verified.cs @@ -9,6 +9,13 @@ namespace G ///
public sealed partial class ListFilesResponse { + /// + /// Example: list + /// + /// list + [global::Newtonsoft.Json.JsonProperty("object", Required = global::Newtonsoft.Json.Required.Always)] + public string Object { get; set; } = default!; + /// /// /// @@ -16,10 +23,25 @@ public sealed partial class ListFilesResponse public global::System.Collections.Generic.IList Data { get; set; } = default!; /// - /// + /// Example: file-abc123 + /// + /// file-abc123 + [global::Newtonsoft.Json.JsonProperty("first_id", Required = global::Newtonsoft.Json.Required.Always)] + public string FirstId { get; set; } = default!; + + /// + /// Example: file-abc456 + /// + /// file-abc456 + [global::Newtonsoft.Json.JsonProperty("last_id", Required = global::Newtonsoft.Json.Required.Always)] + public string LastId { get; set; } = default!; + + /// + /// Example: false /// - [global::Newtonsoft.Json.JsonProperty("object")] - public global::G.ListFilesResponseObject Object { get; set; } + /// false + [global::Newtonsoft.Json.JsonProperty("has_more", Required = global::Newtonsoft.Json.Required.Always)] + public bool HasMore { get; set; } = default!; /// /// Additional properties that are not explicitly defined in the schema @@ -30,14 +52,31 @@ public sealed partial class ListFilesResponse /// /// Initializes a new instance of the class. /// + /// + /// Example: list + /// /// - /// + /// + /// Example: file-abc123 + /// + /// + /// Example: file-abc456 + /// + /// + /// Example: false + /// public ListFilesResponse( + string @object, global::System.Collections.Generic.IList data, - global::G.ListFilesResponseObject @object) + string firstId, + string lastId, + bool hasMore) { + this.Object = @object ?? throw new global::System.ArgumentNullException(nameof(@object)); this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); - this.Object = @object; + this.FirstId = firstId ?? throw new global::System.ArgumentNullException(nameof(firstId)); + this.LastId = lastId ?? throw new global::System.ArgumentNullException(nameof(lastId)); + this.HasMore = hasMore; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListModelsResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListModelsResponse.g.verified.cs index d43536fb24..f73e182697 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListModelsResponse.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListModelsResponse.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class ListModelsResponse /// /// [global::Newtonsoft.Json.JsonProperty("data", Required = global::Newtonsoft.Json.Required.Always)] - public global::System.Collections.Generic.IList Data { get; set; } = default!; + public global::System.Collections.Generic.IList Data { get; set; } = default!; /// /// Additional properties that are not explicitly defined in the schema @@ -33,7 +33,7 @@ public sealed partial class ListModelsResponse /// /// public ListModelsResponse( - global::System.Collections.Generic.IList data, + global::System.Collections.Generic.IList data, global::G.ListModelsResponseObject @object) { this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListRunStepsIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListRunStepsIncludeItem.g.verified.cs new file mode 100644 index 0000000000..8aeb71bc45 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListRunStepsIncludeItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.ListRunStepsIncludeItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum ListRunStepsIncludeItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="step_details.tool_calls[*].file_search.results[*].content")] + StepDetailsToolCallsAnyFileSearchResultsAnyContent, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ListRunStepsIncludeItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ListRunStepsIncludeItem value) + { + return value switch + { + ListRunStepsIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent => "step_details.tool_calls[*].file_search.results[*].content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ListRunStepsIncludeItem? ToEnum(string value) + { + return value switch + { + "step_details.tool_calls[*].file_search.results[*].content" => ListRunStepsIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs index 931fb5ac38..ae2aebb2bf 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class MessageDeltaObjectDelta /// The content of the message in array of text and/or images. /// [global::Newtonsoft.Json.JsonProperty("content")] - public global::System.Collections.Generic.IList? Content { get; set; } + public global::System.Collections.Generic.IList? Content { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -38,7 +38,7 @@ public sealed partial class MessageDeltaObjectDelta /// public MessageDeltaObjectDelta( global::G.MessageDeltaObjectDeltaRole? role, - global::System.Collections.Generic.IList? content) + global::System.Collections.Generic.IList? content) { this.Role = role; this.Content = content; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObject.g.verified.cs index ada244be5b..e4b12cc920 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObject.g.verified.cs @@ -67,7 +67,7 @@ public sealed partial class MessageObject /// The content of the message in array of text and/or images. /// [global::Newtonsoft.Json.JsonProperty("content", Required = global::Newtonsoft.Json.Required.Always)] - public global::System.Collections.Generic.IList Content { get; set; } = default!; + public global::System.Collections.Generic.IList Content { get; set; } = default!; /// /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. @@ -88,7 +88,7 @@ public sealed partial class MessageObject public global::System.Collections.Generic.IList? Attachments { get; set; } = default!; /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata", Required = global::Newtonsoft.Json.Required.Always)] public object? Metadata { get; set; } = default!; @@ -142,7 +142,7 @@ public sealed partial class MessageObject /// A list of files attached to the message, and the tools they were added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public MessageObject( string id, @@ -153,7 +153,7 @@ public MessageObject( global::System.DateTimeOffset? completedAt, global::System.DateTimeOffset? incompleteAt, global::G.MessageObjectRole role, - global::System.Collections.Generic.IList content, + global::System.Collections.Generic.IList content, string? assistantId, string? runId, global::System.Collections.Generic.IList? attachments, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectAttachment.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectAttachment.g.verified.cs index f9d0bab6de..ceee467b0c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectAttachment.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectAttachment.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class MessageObjectAttachment /// The tools to add this file to. /// [global::Newtonsoft.Json.JsonProperty("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -38,7 +38,7 @@ public sealed partial class MessageObjectAttachment /// public MessageObjectAttachment( string? fileId, - global::System.Collections.Generic.IList? tools) + global::System.Collections.Generic.IList? tools) { this.FileId = fileId; this.Tools = tools; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectMetadata.g.verified.cs index 4363246cd9..4aacce1ffc 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.MessageObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class MessageObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model12.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model15.Json.g.verified.cs similarity index 92% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model12.Json.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model15.Json.g.verified.cs index 938687f5f8..e35899f3e5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model12.Json.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model15.Json.g.verified.cs @@ -1,9 +1,9 @@ -//HintName: G.Models.Model12.Json.g.cs +//HintName: G.Models.Model15.Json.g.cs #nullable enable namespace G { - public sealed partial class Model12 + public sealed partial class Model15 { /// /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. @@ -27,11 +27,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::G.Model12? FromJson( + public static global::G.Model15? FromJson( string json, global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) { - return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( json, jsonSerializerOptions); } @@ -43,14 +43,14 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( global::System.IO.Stream jsonStream, global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) { using var streamReader = new global::System.IO.StreamReader(jsonStream); using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); - return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); } } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model12.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model15.g.verified.cs similarity index 91% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model12.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model15.g.verified.cs index f2bfb99a34..d8964fc005 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model12.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.Model15.g.verified.cs @@ -1,4 +1,4 @@ -//HintName: G.Models.Model12.g.cs +//HintName: G.Models.Model15.g.cs #nullable enable @@ -7,7 +7,7 @@ namespace G /// /// Describes an OpenAI model offering that can be used with the API. /// - public sealed partial class Model12 + public sealed partial class Model15 { /// /// The model identifier, which can be referenced in the API endpoints. @@ -40,7 +40,7 @@ public sealed partial class Model12 public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// /// The model identifier, which can be referenced in the API endpoints. @@ -54,7 +54,7 @@ public sealed partial class Model12 /// /// The organization that owns the model. /// - public Model12( + public Model15( string id, global::System.DateTimeOffset created, string ownedBy, @@ -67,9 +67,9 @@ public Model12( } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - public Model12() + public Model15() { } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequest.g.verified.cs index 7d338668da..2d4b931fe3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequest.g.verified.cs @@ -10,7 +10,7 @@ namespace G public sealed partial class ModifyAssistantRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::Newtonsoft.Json.JsonProperty("model")] public string? Model { get; set; } @@ -34,10 +34,11 @@ public sealed partial class ModifyAssistantRequest public string? Instructions { get; set; } /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @@ -46,13 +47,13 @@ public sealed partial class ModifyAssistantRequest public global::G.ModifyAssistantRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -61,7 +62,8 @@ public sealed partial class ModifyAssistantRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -70,9 +72,9 @@ public sealed partial class ModifyAssistantRequest public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::Newtonsoft.Json.JsonProperty("response_format")] @@ -88,7 +90,7 @@ public sealed partial class ModifyAssistantRequest /// Initializes a new instance of the class. ///
/// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -100,28 +102,30 @@ public sealed partial class ModifyAssistantRequest /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// public ModifyAssistantRequest( @@ -129,7 +133,7 @@ public ModifyAssistantRequest( string? name, string? description, string? instructions, - global::System.Collections.Generic.IList? tools, + global::System.Collections.Generic.IList? tools, global::G.ModifyAssistantRequestToolResources? toolResources, object? metadata, double? temperature, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs index 9210931c69..0668bd4cee 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyAssistantRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs index 6e098a1ea0..f3b0cecaaf 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class ModifyAssistantRequestToolResourcesCodeInterpreter { /// - /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class ModifyAssistantRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// public ModifyAssistantRequestToolResourcesCodeInterpreter( global::System.Collections.Generic.IList? fileIds) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequest.g.verified.cs index 1e545fd650..fea17f661a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequest.g.verified.cs @@ -10,7 +10,7 @@ namespace G public sealed partial class ModifyMessageRequest { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -25,7 +25,7 @@ public sealed partial class ModifyMessageRequest /// Initializes a new instance of the class. ///
/// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public ModifyMessageRequest( object? metadata) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs index a4246f946b..8e5add858d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyMessageRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequest.g.verified.cs index 0fc52c0233..35264db423 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequest.g.verified.cs @@ -10,7 +10,7 @@ namespace G public sealed partial class ModifyRunRequest { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -25,7 +25,7 @@ public sealed partial class ModifyRunRequest /// Initializes a new instance of the class. ///
/// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public ModifyRunRequest( object? metadata) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs index 484a184c76..bd57dc5312 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyRunRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequest.g.verified.cs index f22bfd76bc..90ced01842 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequest.g.verified.cs @@ -16,7 +16,7 @@ public sealed partial class ModifyThreadRequest public global::G.ModifyThreadRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -34,7 +34,7 @@ public sealed partial class ModifyThreadRequest /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public ModifyThreadRequest( global::G.ModifyThreadRequestToolResources? toolResources, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs index 9eaa6593a6..939c6491e1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyThreadRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs index 822ccbafd9..2b37299879 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class ModifyThreadRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class ModifyThreadRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// public ModifyThreadRequestToolResourcesCodeInterpreter( global::System.Collections.Generic.IList? fileIds) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContent.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContent.Json.g.verified.cs new file mode 100644 index 0000000000..6c051cef8d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContent.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.PredictionContent.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class PredictionContent + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.PredictionContent? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContent.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContent.g.verified.cs new file mode 100644 index 0000000000..77f66ddc98 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContent.g.verified.cs @@ -0,0 +1,63 @@ +//HintName: G.Models.PredictionContent.g.cs + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// Static predicted output content, such as the content of a text file that is
+ /// being regenerated. + ///
+ public sealed partial class PredictionContent + { + /// + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. + ///
+ [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.PredictionContentType Type { get; set; } + + /// + /// The content that should be matched when generating a model response.
+ /// If generated tokens would match this content, the entire model response
+ /// can be returned much more quickly. + ///
+ [global::Newtonsoft.Json.JsonProperty("content", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.OneOf> Content { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. + /// + /// + /// The content that should be matched when generating a model response.
+ /// If generated tokens would match this content, the entire model response
+ /// can be returned much more quickly. + /// + public PredictionContent( + global::G.OneOf> content, + global::G.PredictionContentType type) + { + this.Content = content; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public PredictionContent() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContentType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContentType.g.verified.cs new file mode 100644 index 0000000000..c227eafe2d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.PredictionContentType.g.verified.cs @@ -0,0 +1,49 @@ +//HintName: G.Models.PredictionContentType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum PredictionContentType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="content")] + Content, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class PredictionContentTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this PredictionContentType value) + { + return value switch + { + PredictionContentType.Content => "content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static PredictionContentType? ToEnum(string value) + { + return value switch + { + "content" => PredictionContentType.Content, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimit.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimit.Json.g.verified.cs new file mode 100644 index 0000000000..206cabcd24 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimit.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ProjectRateLimit.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ProjectRateLimit + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ProjectRateLimit? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimit.g.verified.cs new file mode 100644 index 0000000000..7c610dd035 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimit.g.verified.cs @@ -0,0 +1,131 @@ +//HintName: G.Models.ProjectRateLimit.g.cs + +#nullable enable + +namespace G +{ + /// + /// Represents a project rate limit config. + /// + public sealed partial class ProjectRateLimit + { + /// + /// The object type, which is always `project.rate_limit` + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.ProjectRateLimitObject Object { get; set; } + + /// + /// The identifier, which can be referenced in API endpoints. + /// + [global::Newtonsoft.Json.JsonProperty("id", Required = global::Newtonsoft.Json.Required.Always)] + public string Id { get; set; } = default!; + + /// + /// The model this rate limit applies to. + /// + [global::Newtonsoft.Json.JsonProperty("model", Required = global::Newtonsoft.Json.Required.Always)] + public string Model { get; set; } = default!; + + /// + /// The maximum requests per minute. + /// + [global::Newtonsoft.Json.JsonProperty("max_requests_per_1_minute", Required = global::Newtonsoft.Json.Required.Always)] + public int MaxRequestsPer1Minute { get; set; } = default!; + + /// + /// The maximum tokens per minute. + /// + [global::Newtonsoft.Json.JsonProperty("max_tokens_per_1_minute", Required = global::Newtonsoft.Json.Required.Always)] + public int MaxTokensPer1Minute { get; set; } = default!; + + /// + /// The maximum images per minute. Only present for relevant models. + /// + [global::Newtonsoft.Json.JsonProperty("max_images_per_1_minute")] + public int? MaxImagesPer1Minute { get; set; } + + /// + /// The maximum audio megabytes per minute. Only present for relevant models. + /// + [global::Newtonsoft.Json.JsonProperty("max_audio_megabytes_per_1_minute")] + public int? MaxAudioMegabytesPer1Minute { get; set; } + + /// + /// The maximum requests per day. Only present for relevant models. + /// + [global::Newtonsoft.Json.JsonProperty("max_requests_per_1_day")] + public int? MaxRequestsPer1Day { get; set; } + + /// + /// The maximum batch input tokens per day. Only present for relevant models. + /// + [global::Newtonsoft.Json.JsonProperty("batch_1_day_max_input_tokens")] + public int? Batch1DayMaxInputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The object type, which is always `project.rate_limit` + /// + /// + /// The identifier, which can be referenced in API endpoints. + /// + /// + /// The model this rate limit applies to. + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only present for relevant models. + /// + /// + /// The maximum audio megabytes per minute. Only present for relevant models. + /// + /// + /// The maximum requests per day. Only present for relevant models. + /// + /// + /// The maximum batch input tokens per day. Only present for relevant models. + /// + public ProjectRateLimit( + string id, + string model, + int maxRequestsPer1Minute, + int maxTokensPer1Minute, + global::G.ProjectRateLimitObject @object, + int? maxImagesPer1Minute, + int? maxAudioMegabytesPer1Minute, + int? maxRequestsPer1Day, + int? batch1DayMaxInputTokens) + { + this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); + this.Model = model ?? throw new global::System.ArgumentNullException(nameof(model)); + this.MaxRequestsPer1Minute = maxRequestsPer1Minute; + this.MaxTokensPer1Minute = maxTokensPer1Minute; + this.Object = @object; + this.MaxImagesPer1Minute = maxImagesPer1Minute; + this.MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute; + this.MaxRequestsPer1Day = maxRequestsPer1Day; + this.Batch1DayMaxInputTokens = batch1DayMaxInputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public ProjectRateLimit() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponse.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponse.Json.g.verified.cs new file mode 100644 index 0000000000..74168a4870 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponse.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ProjectRateLimitListResponse.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ProjectRateLimitListResponse + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ProjectRateLimitListResponse? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponse.g.verified.cs new file mode 100644 index 0000000000..3b4398ca46 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponse.g.verified.cs @@ -0,0 +1,77 @@ +//HintName: G.Models.ProjectRateLimitListResponse.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class ProjectRateLimitListResponse + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.ProjectRateLimitListResponseObject Object { get; set; } + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("data", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Data { get; set; } = default!; + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("first_id", Required = global::Newtonsoft.Json.Required.Always)] + public string FirstId { get; set; } = default!; + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("last_id", Required = global::Newtonsoft.Json.Required.Always)] + public string LastId { get; set; } = default!; + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("has_more", Required = global::Newtonsoft.Json.Required.Always)] + public bool HasMore { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// + /// + /// + public ProjectRateLimitListResponse( + global::System.Collections.Generic.IList data, + string firstId, + string lastId, + bool hasMore, + global::G.ProjectRateLimitListResponseObject @object) + { + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.FirstId = firstId ?? throw new global::System.ArgumentNullException(nameof(firstId)); + this.LastId = lastId ?? throw new global::System.ArgumentNullException(nameof(lastId)); + this.HasMore = hasMore; + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public ProjectRateLimitListResponse() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponseObject.g.verified.cs new file mode 100644 index 0000000000..da646da42a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitListResponseObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.ProjectRateLimitListResponseObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum ProjectRateLimitListResponseObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="list")] + List, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ProjectRateLimitListResponseObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ProjectRateLimitListResponseObject value) + { + return value switch + { + ProjectRateLimitListResponseObject.List => "list", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ProjectRateLimitListResponseObject? ToEnum(string value) + { + return value switch + { + "list" => ProjectRateLimitListResponseObject.List, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitObject.g.verified.cs new file mode 100644 index 0000000000..58ad663148 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.ProjectRateLimitObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// The object type, which is always `project.rate_limit` + /// + [global::System.Runtime.Serialization.DataContract] + public enum ProjectRateLimitObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project.rate_limit")] + ProjectRateLimit, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ProjectRateLimitObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ProjectRateLimitObject value) + { + return value switch + { + ProjectRateLimitObject.ProjectRateLimit => "project.rate_limit", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ProjectRateLimitObject? ToEnum(string value) + { + return value switch + { + "project.rate_limit" => ProjectRateLimitObject.ProjectRateLimit, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitUpdateRequest.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitUpdateRequest.Json.g.verified.cs new file mode 100644 index 0000000000..4f254b54ff --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitUpdateRequest.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ProjectRateLimitUpdateRequest.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ProjectRateLimitUpdateRequest + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ProjectRateLimitUpdateRequest? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitUpdateRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitUpdateRequest.g.verified.cs new file mode 100644 index 0000000000..b9f1704b92 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ProjectRateLimitUpdateRequest.g.verified.cs @@ -0,0 +1,98 @@ +//HintName: G.Models.ProjectRateLimitUpdateRequest.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class ProjectRateLimitUpdateRequest + { + /// + /// The maximum requests per minute. + /// + [global::Newtonsoft.Json.JsonProperty("max_requests_per_1_minute")] + public int? MaxRequestsPer1Minute { get; set; } + + /// + /// The maximum tokens per minute. + /// + [global::Newtonsoft.Json.JsonProperty("max_tokens_per_1_minute")] + public int? MaxTokensPer1Minute { get; set; } + + /// + /// The maximum images per minute. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("max_images_per_1_minute")] + public int? MaxImagesPer1Minute { get; set; } + + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("max_audio_megabytes_per_1_minute")] + public int? MaxAudioMegabytesPer1Minute { get; set; } + + /// + /// The maximum requests per day. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("max_requests_per_1_day")] + public int? MaxRequestsPer1Day { get; set; } + + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + [global::Newtonsoft.Json.JsonProperty("batch_1_day_max_input_tokens")] + public int? Batch1DayMaxInputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + public ProjectRateLimitUpdateRequest( + int? maxRequestsPer1Minute, + int? maxTokensPer1Minute, + int? maxImagesPer1Minute, + int? maxAudioMegabytesPer1Minute, + int? maxRequestsPer1Day, + int? batch1DayMaxInputTokens) + { + this.MaxRequestsPer1Minute = maxRequestsPer1Minute; + this.MaxTokensPer1Minute = maxTokensPer1Minute; + this.MaxImagesPer1Minute = maxImagesPer1Minute; + this.MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute; + this.MaxRequestsPer1Day = maxRequestsPer1Day; + this.Batch1DayMaxInputTokens = batch1DayMaxInputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public ProjectRateLimitUpdateRequest() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreate.Json.g.verified.cs new file mode 100644 index 0000000000..3bd23b9a29 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreate.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventConversationItemCreate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventConversationItemCreate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventConversationItemCreate? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreate.g.verified.cs new file mode 100644 index 0000000000..aae65c83d6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreate.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.Models.RealtimeClientEventConversationItemCreate.g.cs + +#nullable enable + +namespace G +{ + /// + /// Add a new Item to the Conversation's context, including messages, function
+ /// calls, and function call responses. This event can be used both to populate a
+ /// "history" of the conversation and to add new items mid-stream, but has the
+ /// current limitation that it cannot populate assistant audio messages.
+ /// If successful, the server will respond with a `conversation.item.created`
+ /// event, otherwise an `error` event will be sent. + ///
+ public sealed partial class RealtimeClientEventConversationItemCreate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `conversation.item.create`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventConversationItemCreateType Type { get; set; } + + /// + /// The ID of the preceding item after which the new item will be inserted.
+ /// If not set, the new item will be appended to the end of the conversation.
+ /// If set, it allows an item to be inserted mid-conversation. If the ID
+ /// cannot be found, an error will be returned and the item will not be added. + ///
+ [global::Newtonsoft.Json.JsonProperty("previous_item_id")] + public string? PreviousItemId { get; set; } + + /// + /// The item to add to the conversation. + /// + [global::Newtonsoft.Json.JsonProperty("item", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeConversationItem Item { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `conversation.item.create`. + /// + /// + /// The ID of the preceding item after which the new item will be inserted.
+ /// If not set, the new item will be appended to the end of the conversation.
+ /// If set, it allows an item to be inserted mid-conversation. If the ID
+ /// cannot be found, an error will be returned and the item will not be added. + /// + /// + /// The item to add to the conversation. + /// + public RealtimeClientEventConversationItemCreate( + global::G.RealtimeConversationItem item, + string? eventId, + global::G.RealtimeClientEventConversationItemCreateType type, + string? previousItemId) + { + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.EventId = eventId; + this.Type = type; + this.PreviousItemId = previousItemId; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventConversationItemCreate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreateType.g.verified.cs new file mode 100644 index 0000000000..12a0a6c63a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemCreateType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventConversationItemCreateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.create`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventConversationItemCreateType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.create")] + ConversationItemCreate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventConversationItemCreateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventConversationItemCreateType value) + { + return value switch + { + RealtimeClientEventConversationItemCreateType.ConversationItemCreate => "conversation.item.create", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventConversationItemCreateType? ToEnum(string value) + { + return value switch + { + "conversation.item.create" => RealtimeClientEventConversationItemCreateType.ConversationItemCreate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDelete.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDelete.Json.g.verified.cs new file mode 100644 index 0000000000..6b7023566a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDelete.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventConversationItemDelete.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventConversationItemDelete + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventConversationItemDelete? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDelete.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDelete.g.verified.cs new file mode 100644 index 0000000000..c73cdc67ae --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDelete.g.verified.cs @@ -0,0 +1,68 @@ +//HintName: G.Models.RealtimeClientEventConversationItemDelete.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event when you want to remove any item from the conversation
+ /// history. The server will respond with a `conversation.item.deleted` event,
+ /// unless the item does not exist in the conversation history, in which case the
+ /// server will respond with an error. + ///
+ public sealed partial class RealtimeClientEventConversationItemDelete + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `conversation.item.delete`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventConversationItemDeleteType Type { get; set; } + + /// + /// The ID of the item to delete. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `conversation.item.delete`. + /// + /// + /// The ID of the item to delete. + /// + public RealtimeClientEventConversationItemDelete( + string itemId, + string? eventId, + global::G.RealtimeClientEventConversationItemDeleteType type) + { + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventConversationItemDelete() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDeleteType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDeleteType.g.verified.cs new file mode 100644 index 0000000000..1f8c05024b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemDeleteType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventConversationItemDeleteType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.delete`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventConversationItemDeleteType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.delete")] + ConversationItemDelete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventConversationItemDeleteTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventConversationItemDeleteType value) + { + return value switch + { + RealtimeClientEventConversationItemDeleteType.ConversationItemDelete => "conversation.item.delete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventConversationItemDeleteType? ToEnum(string value) + { + return value switch + { + "conversation.item.delete" => RealtimeClientEventConversationItemDeleteType.ConversationItemDelete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncate.Json.g.verified.cs new file mode 100644 index 0000000000..7fb99aa204 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncate.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventConversationItemTruncate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventConversationItemTruncate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventConversationItemTruncate? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncate.g.verified.cs new file mode 100644 index 0000000000..b5040f8ce0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncate.g.verified.cs @@ -0,0 +1,101 @@ +//HintName: G.Models.RealtimeClientEventConversationItemTruncate.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to truncate a previous assistant message’s audio. The server
+ /// will produce audio faster than realtime, so this event is useful when the user
+ /// interrupts to truncate audio that has already been sent to the client but not
+ /// yet played. This will synchronize the server's understanding of the audio with
+ /// the client's playback.
+ /// Truncating audio will delete the server-side text transcript to ensure there
+ /// is not text in the context that hasn't been heard by the user.
+ /// If successful, the server will respond with a `conversation.item.truncated`
+ /// event. + ///
+ public sealed partial class RealtimeClientEventConversationItemTruncate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `conversation.item.truncate`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventConversationItemTruncateType Type { get; set; } + + /// + /// The ID of the assistant message item to truncate. Only assistant message
+ /// items can be truncated. + ///
+ [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the content part to truncate. Set this to 0. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// Inclusive duration up to which audio is truncated, in milliseconds. If
+ /// the audio_end_ms is greater than the actual audio duration, the server
+ /// will respond with an error. + ///
+ [global::Newtonsoft.Json.JsonProperty("audio_end_ms", Required = global::Newtonsoft.Json.Required.Always)] + public int AudioEndMs { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `conversation.item.truncate`. + /// + /// + /// The ID of the assistant message item to truncate. Only assistant message
+ /// items can be truncated. + /// + /// + /// The index of the content part to truncate. Set this to 0. + /// + /// + /// Inclusive duration up to which audio is truncated, in milliseconds. If
+ /// the audio_end_ms is greater than the actual audio duration, the server
+ /// will respond with an error. + /// + public RealtimeClientEventConversationItemTruncate( + string itemId, + int contentIndex, + int audioEndMs, + string? eventId, + global::G.RealtimeClientEventConversationItemTruncateType type) + { + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.AudioEndMs = audioEndMs; + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventConversationItemTruncate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncateType.g.verified.cs new file mode 100644 index 0000000000..9e9cbb1346 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventConversationItemTruncateType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventConversationItemTruncateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.truncate`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventConversationItemTruncateType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.truncate")] + ConversationItemTruncate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventConversationItemTruncateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventConversationItemTruncateType value) + { + return value switch + { + RealtimeClientEventConversationItemTruncateType.ConversationItemTruncate => "conversation.item.truncate", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventConversationItemTruncateType? ToEnum(string value) + { + return value switch + { + "conversation.item.truncate" => RealtimeClientEventConversationItemTruncateType.ConversationItemTruncate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.verified.cs new file mode 100644 index 0000000000..0f74c4f8b6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventInputAudioBufferAppend + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventInputAudioBufferAppend? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.g.verified.cs new file mode 100644 index 0000000000..46d7652ebb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.g.verified.cs @@ -0,0 +1,75 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferAppend.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to append audio bytes to the input audio buffer. The audio
+ /// buffer is temporary storage you can write to and later commit. In Server VAD
+ /// mode, the audio buffer is used to detect speech and the server will decide
+ /// when to commit. When Server VAD is disabled, you must commit the audio buffer
+ /// manually.
+ /// The client may choose how much audio to place in each event up to a maximum
+ /// of 15 MiB, for example streaming smaller chunks from the client may allow the
+ /// VAD to be more responsive. Unlike made other client events, the server will
+ /// not send a confirmation response to this event. + ///
+ public sealed partial class RealtimeClientEventInputAudioBufferAppend + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.append`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventInputAudioBufferAppendType Type { get; set; } + + /// + /// Base64-encoded audio bytes. This must be in the format specified by the
+ /// `input_audio_format` field in the session configuration. + ///
+ [global::Newtonsoft.Json.JsonProperty("audio", Required = global::Newtonsoft.Json.Required.Always)] + public string Audio { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `input_audio_buffer.append`. + /// + /// + /// Base64-encoded audio bytes. This must be in the format specified by the
+ /// `input_audio_format` field in the session configuration. + /// + public RealtimeClientEventInputAudioBufferAppend( + string audio, + string? eventId, + global::G.RealtimeClientEventInputAudioBufferAppendType type) + { + this.Audio = audio ?? throw new global::System.ArgumentNullException(nameof(audio)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventInputAudioBufferAppend() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs new file mode 100644 index 0000000000..3aa2a5d6fe --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferAppendType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.append`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventInputAudioBufferAppendType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio_buffer.append")] + InputAudioBufferAppend, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventInputAudioBufferAppendTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventInputAudioBufferAppendType value) + { + return value switch + { + RealtimeClientEventInputAudioBufferAppendType.InputAudioBufferAppend => "input_audio_buffer.append", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventInputAudioBufferAppendType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.append" => RealtimeClientEventInputAudioBufferAppendType.InputAudioBufferAppend, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.verified.cs new file mode 100644 index 0000000000..a6b8308fac --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventInputAudioBufferClear + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventInputAudioBufferClear? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.g.verified.cs new file mode 100644 index 0000000000..0ab5629cba --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferClear.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to clear the audio bytes in the buffer. The server will
+ /// respond with an `input_audio_buffer.cleared` event. + ///
+ public sealed partial class RealtimeClientEventInputAudioBufferClear + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.clear`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventInputAudioBufferClearType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `input_audio_buffer.clear`. + /// + public RealtimeClientEventInputAudioBufferClear( + string? eventId, + global::G.RealtimeClientEventInputAudioBufferClearType type) + { + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventInputAudioBufferClear() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClearType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClearType.g.verified.cs new file mode 100644 index 0000000000..05ca398665 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferClearType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferClearType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.clear`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventInputAudioBufferClearType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio_buffer.clear")] + InputAudioBufferClear, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventInputAudioBufferClearTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventInputAudioBufferClearType value) + { + return value switch + { + RealtimeClientEventInputAudioBufferClearType.InputAudioBufferClear => "input_audio_buffer.clear", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventInputAudioBufferClearType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.clear" => RealtimeClientEventInputAudioBufferClearType.InputAudioBufferClear, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.verified.cs new file mode 100644 index 0000000000..478eaffe96 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventInputAudioBufferCommit + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventInputAudioBufferCommit? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.g.verified.cs new file mode 100644 index 0000000000..b352179f84 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferCommit.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to commit the user input audio buffer, which will create a
+ /// new user message item in the conversation. This event will produce an error
+ /// if the input audio buffer is empty. When in Server VAD mode, the client does
+ /// not need to send this event, the server will commit the audio buffer
+ /// automatically.
+ /// Committing the input audio buffer will trigger input audio transcription
+ /// (if enabled in session configuration), but it will not create a response
+ /// from the model. The server will respond with an `input_audio_buffer.committed`
+ /// event. + ///
+ public sealed partial class RealtimeClientEventInputAudioBufferCommit + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.commit`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventInputAudioBufferCommitType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `input_audio_buffer.commit`. + /// + public RealtimeClientEventInputAudioBufferCommit( + string? eventId, + global::G.RealtimeClientEventInputAudioBufferCommitType type) + { + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventInputAudioBufferCommit() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs new file mode 100644 index 0000000000..d7e50673c8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferCommitType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.commit`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventInputAudioBufferCommitType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio_buffer.commit")] + InputAudioBufferCommit, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventInputAudioBufferCommitTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventInputAudioBufferCommitType value) + { + return value switch + { + RealtimeClientEventInputAudioBufferCommitType.InputAudioBufferCommit => "input_audio_buffer.commit", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventInputAudioBufferCommitType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.commit" => RealtimeClientEventInputAudioBufferCommitType.InputAudioBufferCommit, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancel.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancel.Json.g.verified.cs new file mode 100644 index 0000000000..3853381825 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancel.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventResponseCancel.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventResponseCancel + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventResponseCancel? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancel.g.verified.cs new file mode 100644 index 0000000000..32b7b642bb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancel.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventResponseCancel.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to cancel an in-progress response. The server will respond
+ /// with a `response.cancelled` event or an error if there is no response to
+ /// cancel. + ///
+ public sealed partial class RealtimeClientEventResponseCancel + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `response.cancel`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventResponseCancelType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `response.cancel`. + /// + public RealtimeClientEventResponseCancel( + string? eventId, + global::G.RealtimeClientEventResponseCancelType type) + { + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventResponseCancel() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancelType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancelType.g.verified.cs new file mode 100644 index 0000000000..f7a7549331 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCancelType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventResponseCancelType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.cancel`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventResponseCancelType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.cancel")] + ResponseCancel, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventResponseCancelTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventResponseCancelType value) + { + return value switch + { + RealtimeClientEventResponseCancelType.ResponseCancel => "response.cancel", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventResponseCancelType? ToEnum(string value) + { + return value switch + { + "response.cancel" => RealtimeClientEventResponseCancelType.ResponseCancel, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreate.Json.g.verified.cs new file mode 100644 index 0000000000..669ece243b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreate.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventResponseCreate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventResponseCreate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventResponseCreate? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreate.g.verified.cs new file mode 100644 index 0000000000..77ac9fa5c6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreate.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.RealtimeClientEventResponseCreate.g.cs + +#nullable enable + +namespace G +{ + /// + /// This event instructs the server to create a Response, which means triggering
+ /// model inference. When in Server VAD mode, the server will create Responses
+ /// automatically.
+ /// A Response will include at least one Item, and may have two, in which case
+ /// the second will be a function call. These Items will be appended to the
+ /// conversation history.
+ /// The server will respond with a `response.created` event, events for Items
+ /// and content created, and finally a `response.done` event to indicate the
+ /// Response is complete.
+ /// The `response.create` event includes inference configuration like
+ /// `instructions`, and `temperature`. These fields will override the Session's
+ /// configuration for this Response only. + ///
+ public sealed partial class RealtimeClientEventResponseCreate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `response.create`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventResponseCreateType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::Newtonsoft.Json.JsonProperty("response", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeSession Response { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `response.create`. + /// + /// + /// Realtime session object configuration. + /// + public RealtimeClientEventResponseCreate( + global::G.RealtimeSession response, + string? eventId, + global::G.RealtimeClientEventResponseCreateType type) + { + this.Response = response ?? throw new global::System.ArgumentNullException(nameof(response)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventResponseCreate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreateType.g.verified.cs new file mode 100644 index 0000000000..394e779066 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventResponseCreateType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventResponseCreateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.create`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventResponseCreateType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.create")] + ResponseCreate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventResponseCreateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventResponseCreateType value) + { + return value switch + { + RealtimeClientEventResponseCreateType.ResponseCreate => "response.create", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventResponseCreateType? ToEnum(string value) + { + return value switch + { + "response.create" => RealtimeClientEventResponseCreateType.ResponseCreate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdate.Json.g.verified.cs new file mode 100644 index 0000000000..2f90d17a7a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdate.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeClientEventSessionUpdate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventSessionUpdate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventSessionUpdate? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdate.g.verified.cs new file mode 100644 index 0000000000..3047fff9a1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdate.g.verified.cs @@ -0,0 +1,70 @@ +//HintName: G.Models.RealtimeClientEventSessionUpdate.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to update the session’s default configuration. The client may
+ /// send this event at any time to update the session configuration, and any
+ /// field may be updated at any time, except for "voice". The server will respond
+ /// with a `session.updated` event that shows the full effective configuration.
+ /// Only fields that are present are updated, thus the correct way to clear a
+ /// field like "instructions" is to pass an empty string. + ///
+ public sealed partial class RealtimeClientEventSessionUpdate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `session.update`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeClientEventSessionUpdateType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::Newtonsoft.Json.JsonProperty("session", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeSession Session { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `session.update`. + /// + /// + /// Realtime session object configuration. + /// + public RealtimeClientEventSessionUpdate( + global::G.RealtimeSession session, + string? eventId, + global::G.RealtimeClientEventSessionUpdateType type) + { + this.Session = session ?? throw new global::System.ArgumentNullException(nameof(session)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventSessionUpdate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdateType.g.verified.cs new file mode 100644 index 0000000000..292396a2c5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeClientEventSessionUpdateType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeClientEventSessionUpdateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `session.update`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeClientEventSessionUpdateType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="session.update")] + SessionUpdate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventSessionUpdateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventSessionUpdateType value) + { + return value switch + { + RealtimeClientEventSessionUpdateType.SessionUpdate => "session.update", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventSessionUpdateType? ToEnum(string value) + { + return value switch + { + "session.update" => RealtimeClientEventSessionUpdateType.SessionUpdate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItem.Json.g.verified.cs new file mode 100644 index 0000000000..fc34dea873 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItem.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeConversationItem.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeConversationItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeConversationItem? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItem.g.verified.cs new file mode 100644 index 0000000000..4e5633a731 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItem.g.verified.cs @@ -0,0 +1,166 @@ +//HintName: G.Models.RealtimeConversationItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// The item to add to the conversation. + /// + public sealed partial class RealtimeConversationItem + { + /// + /// The unique ID of the item, this can be generated by the client to help
+ /// manage server-side context, but is not required because the server will
+ /// generate one if not provided. + ///
+ [global::Newtonsoft.Json.JsonProperty("id")] + public string? Id { get; set; } + + /// + /// The type of the item (`message`, `function_call`, `function_call_output`). + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeConversationItemType? Type { get; set; } + + /// + /// Identifier for the API object being returned - always `realtime.item`. + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.RealtimeConversationItemObject? Object { get; set; } + + /// + /// The status of the item (`completed`, `incomplete`). These have no effect
+ /// on the conversation, but are accepted for consistency with the
+ /// `conversation.item.created` event. + ///
+ [global::Newtonsoft.Json.JsonProperty("status")] + public global::G.RealtimeConversationItemStatus? Status { get; set; } + + /// + /// The role of the message sender (`user`, `assistant`, `system`), only
+ /// applicable for `message` items. + ///
+ [global::Newtonsoft.Json.JsonProperty("role")] + public global::G.RealtimeConversationItemRole? Role { get; set; } + + /// + /// The content of the message, applicable for `message` items.
+ /// - Message items of role `system` support only `input_text` content
+ /// - Message items of role `user` support `input_text` and `input_audio`
+ /// content
+ /// - Message items of role `assistant` support `text` content. + ///
+ [global::Newtonsoft.Json.JsonProperty("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// The ID of the function call (for `function_call` and
+ /// `function_call_output` items). If passed on a `function_call_output`
+ /// item, the server will check that a `function_call` item with the same
+ /// ID exists in the conversation history. + ///
+ [global::Newtonsoft.Json.JsonProperty("call_id")] + public string? CallId { get; set; } + + /// + /// The name of the function being called (for `function_call` items). + /// + [global::Newtonsoft.Json.JsonProperty("name")] + public string? Name { get; set; } + + /// + /// The arguments of the function call (for `function_call` items). + /// + [global::Newtonsoft.Json.JsonProperty("arguments")] + public string? Arguments { get; set; } + + /// + /// The output of the function call (for `function_call_output` items). + /// + [global::Newtonsoft.Json.JsonProperty("output")] + public string? Output { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the item, this can be generated by the client to help
+ /// manage server-side context, but is not required because the server will
+ /// generate one if not provided. + /// + /// + /// The type of the item (`message`, `function_call`, `function_call_output`). + /// + /// + /// Identifier for the API object being returned - always `realtime.item`. + /// + /// + /// The status of the item (`completed`, `incomplete`). These have no effect
+ /// on the conversation, but are accepted for consistency with the
+ /// `conversation.item.created` event. + /// + /// + /// The role of the message sender (`user`, `assistant`, `system`), only
+ /// applicable for `message` items. + /// + /// + /// The content of the message, applicable for `message` items.
+ /// - Message items of role `system` support only `input_text` content
+ /// - Message items of role `user` support `input_text` and `input_audio`
+ /// content
+ /// - Message items of role `assistant` support `text` content. + /// + /// + /// The ID of the function call (for `function_call` and
+ /// `function_call_output` items). If passed on a `function_call_output`
+ /// item, the server will check that a `function_call` item with the same
+ /// ID exists in the conversation history. + /// + /// + /// The name of the function being called (for `function_call` items). + /// + /// + /// The arguments of the function call (for `function_call` items). + /// + /// + /// The output of the function call (for `function_call_output` items). + /// + public RealtimeConversationItem( + string? id, + global::G.RealtimeConversationItemType? type, + global::G.RealtimeConversationItemObject? @object, + global::G.RealtimeConversationItemStatus? status, + global::G.RealtimeConversationItemRole? role, + global::System.Collections.Generic.IList? content, + string? callId, + string? name, + string? arguments, + string? output) + { + this.Id = id; + this.Type = type; + this.Object = @object; + this.Status = status; + this.Role = role; + this.Content = content; + this.CallId = callId; + this.Name = name; + this.Arguments = arguments; + this.Output = output; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeConversationItem() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItem.Json.g.verified.cs new file mode 100644 index 0000000000..dbf6e9c7d6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItem.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeConversationItemContentItem.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeConversationItemContentItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeConversationItemContentItem? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItem.g.verified.cs new file mode 100644 index 0000000000..ddaf7f1f12 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItem.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.RealtimeConversationItemContentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RealtimeConversationItemContentItem + { + /// + /// The content type (`input_text`, `input_audio`, `text`). + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeConversationItemContentItemType? Type { get; set; } + + /// + /// The text content, used for `input_text` and `text` content types. + /// + [global::Newtonsoft.Json.JsonProperty("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio bytes, used for `input_audio` content type. + /// + [global::Newtonsoft.Json.JsonProperty("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio, used for `input_audio` content type. + /// + [global::Newtonsoft.Json.JsonProperty("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The content type (`input_text`, `input_audio`, `text`). + /// + /// + /// The text content, used for `input_text` and `text` content types. + /// + /// + /// Base64-encoded audio bytes, used for `input_audio` content type. + /// + /// + /// The transcript of the audio, used for `input_audio` content type. + /// + public RealtimeConversationItemContentItem( + global::G.RealtimeConversationItemContentItemType? type, + string? text, + string? audio, + string? transcript) + { + this.Type = type; + this.Text = text; + this.Audio = audio; + this.Transcript = transcript; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeConversationItemContentItem() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItemType.g.verified.cs new file mode 100644 index 0000000000..7d33a810d3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemContentItemType.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.RealtimeConversationItemContentItemType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content type (`input_text`, `input_audio`, `text`). + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeConversationItemContentItemType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio")] + InputAudio, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_text")] + InputText, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemContentItemTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemContentItemType value) + { + return value switch + { + RealtimeConversationItemContentItemType.InputAudio => "input_audio", + RealtimeConversationItemContentItemType.InputText => "input_text", + RealtimeConversationItemContentItemType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemContentItemType? ToEnum(string value) + { + return value switch + { + "input_audio" => RealtimeConversationItemContentItemType.InputAudio, + "input_text" => RealtimeConversationItemContentItemType.InputText, + "text" => RealtimeConversationItemContentItemType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemObject.g.verified.cs new file mode 100644 index 0000000000..f4fe7b7b3c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeConversationItemObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// Identifier for the API object being returned - always `realtime.item`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeConversationItemObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="realtime.item")] + RealtimeItem, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemObject value) + { + return value switch + { + RealtimeConversationItemObject.RealtimeItem => "realtime.item", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemObject? ToEnum(string value) + { + return value switch + { + "realtime.item" => RealtimeConversationItemObject.RealtimeItem, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemRole.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemRole.g.verified.cs new file mode 100644 index 0000000000..78eb5071c0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemRole.g.verified.cs @@ -0,0 +1,63 @@ +//HintName: G.Models.RealtimeConversationItemRole.g.cs + +#nullable enable + +namespace G +{ + /// + /// The role of the message sender (`user`, `assistant`, `system`), only
+ /// applicable for `message` items. + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeConversationItemRole + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="user")] + User, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="assistant")] + Assistant, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="systems")] + Systems, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemRoleExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemRole value) + { + return value switch + { + RealtimeConversationItemRole.User => "user", + RealtimeConversationItemRole.Assistant => "assistant", + RealtimeConversationItemRole.Systems => "systems", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemRole? ToEnum(string value) + { + return value switch + { + "user" => RealtimeConversationItemRole.User, + "assistant" => RealtimeConversationItemRole.Assistant, + "systems" => RealtimeConversationItemRole.Systems, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemStatus.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemStatus.g.verified.cs new file mode 100644 index 0000000000..c6470781ab --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemStatus.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: G.Models.RealtimeConversationItemStatus.g.cs + +#nullable enable + +namespace G +{ + /// + /// The status of the item (`completed`, `incomplete`). These have no effect
+ /// on the conversation, but are accepted for consistency with the
+ /// `conversation.item.created` event. + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeConversationItemStatus + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="completed")] + Completed, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="incomplete")] + Incomplete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemStatusExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemStatus value) + { + return value switch + { + RealtimeConversationItemStatus.Completed => "completed", + RealtimeConversationItemStatus.Incomplete => "incomplete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemStatus? ToEnum(string value) + { + return value switch + { + "completed" => RealtimeConversationItemStatus.Completed, + "incomplete" => RealtimeConversationItemStatus.Incomplete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemType.g.verified.cs new file mode 100644 index 0000000000..741295f57b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeConversationItemType.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.RealtimeConversationItemType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the item (`message`, `function_call`, `function_call_output`). + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeConversationItemType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="message")] + Message, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="function_call")] + FunctionCall, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="function_call_output")] + FunctionCallOutput, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemType value) + { + return value switch + { + RealtimeConversationItemType.Message => "message", + RealtimeConversationItemType.FunctionCall => "function_call", + RealtimeConversationItemType.FunctionCallOutput => "function_call_output", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemType? ToEnum(string value) + { + return value switch + { + "message" => RealtimeConversationItemType.Message, + "function_call" => RealtimeConversationItemType.FunctionCall, + "function_call_output" => RealtimeConversationItemType.FunctionCallOutput, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponse.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponse.Json.g.verified.cs new file mode 100644 index 0000000000..3269e6eb89 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponse.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeResponse.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponse + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponse? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponse.g.verified.cs new file mode 100644 index 0000000000..b65ba593c6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponse.g.verified.cs @@ -0,0 +1,106 @@ +//HintName: G.Models.RealtimeResponse.g.cs + +#nullable enable + +namespace G +{ + /// + /// The response resource. + /// + public sealed partial class RealtimeResponse + { + /// + /// The unique ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("id")] + public string? Id { get; set; } + + /// + /// The object type, must be `realtime.response`. + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.RealtimeResponseObject? Object { get; set; } + + /// + /// The final status of the response (`completed`, `cancelled`, `failed`, or
+ /// `incomplete`). + ///
+ [global::Newtonsoft.Json.JsonProperty("status")] + public global::G.RealtimeResponseStatus? Status { get; set; } + + /// + /// Additional details about the status. + /// + [global::Newtonsoft.Json.JsonProperty("status_details")] + public global::G.RealtimeResponseStatusDetails? StatusDetails { get; set; } + + /// + /// The list of output items generated by the response. + /// + [global::Newtonsoft.Json.JsonProperty("output")] + public global::System.Collections.Generic.IList? Output { get; set; } + + /// + /// Usage statistics for the Response, this will correspond to billing. A
+ /// Realtime API session will maintain a conversation context and append new
+ /// Items to the Conversation, thus output from previous turns (text and
+ /// audio tokens) will become the input for later turns. + ///
+ [global::Newtonsoft.Json.JsonProperty("usage")] + public global::G.RealtimeResponseUsage? Usage { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the response. + /// + /// + /// The object type, must be `realtime.response`. + /// + /// + /// The final status of the response (`completed`, `cancelled`, `failed`, or
+ /// `incomplete`). + /// + /// + /// Additional details about the status. + /// + /// + /// The list of output items generated by the response. + /// + /// + /// Usage statistics for the Response, this will correspond to billing. A
+ /// Realtime API session will maintain a conversation context and append new
+ /// Items to the Conversation, thus output from previous turns (text and
+ /// audio tokens) will become the input for later turns. + /// + public RealtimeResponse( + string? id, + global::G.RealtimeResponseObject? @object, + global::G.RealtimeResponseStatus? status, + global::G.RealtimeResponseStatusDetails? statusDetails, + global::System.Collections.Generic.IList? output, + global::G.RealtimeResponseUsage? usage) + { + this.Id = id; + this.Object = @object; + this.Status = status; + this.StatusDetails = statusDetails; + this.Output = output; + this.Usage = usage; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponse() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseObject.g.verified.cs new file mode 100644 index 0000000000..3662684e24 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeResponseObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// The object type, must be `realtime.response`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeResponseObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="realtime.response")] + RealtimeResponse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseObject value) + { + return value switch + { + RealtimeResponseObject.RealtimeResponse => "realtime.response", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseObject? ToEnum(string value) + { + return value switch + { + "realtime.response" => RealtimeResponseObject.RealtimeResponse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatus.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatus.g.verified.cs new file mode 100644 index 0000000000..85120fdd03 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatus.g.verified.cs @@ -0,0 +1,70 @@ +//HintName: G.Models.RealtimeResponseStatus.g.cs + +#nullable enable + +namespace G +{ + /// + /// The final status of the response (`completed`, `cancelled`, `failed`, or
+ /// `incomplete`). + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeResponseStatus + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="completed")] + Completed, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="cancelled")] + Cancelled, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="failed")] + Failed, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="incomplete")] + Incomplete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseStatusExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseStatus value) + { + return value switch + { + RealtimeResponseStatus.Completed => "completed", + RealtimeResponseStatus.Cancelled => "cancelled", + RealtimeResponseStatus.Failed => "failed", + RealtimeResponseStatus.Incomplete => "incomplete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseStatus? ToEnum(string value) + { + return value switch + { + "completed" => RealtimeResponseStatus.Completed, + "cancelled" => RealtimeResponseStatus.Cancelled, + "failed" => RealtimeResponseStatus.Failed, + "incomplete" => RealtimeResponseStatus.Incomplete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetails.Json.g.verified.cs new file mode 100644 index 0000000000..6395f25777 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetails.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeResponseStatusDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseStatusDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseStatusDetails? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetails.g.verified.cs new file mode 100644 index 0000000000..56e52f4c16 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetails.g.verified.cs @@ -0,0 +1,77 @@ +//HintName: G.Models.RealtimeResponseStatusDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Additional details about the status. + /// + public sealed partial class RealtimeResponseStatusDetails + { + /// + /// The type of error that caused the response to fail, corresponding
+ /// with the `status` field (`cancelled`, `incomplete`, `failed`). + ///
+ [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeResponseStatusDetailsType? Type { get; set; } + + /// + /// The reason the Response did not complete. For a `cancelled` Response,
+ /// one of `turn_detected` (the server VAD detected a new start of speech)
+ /// or `client_cancelled` (the client sent a cancel event). For an
+ /// `incomplete` Response, one of `max_output_tokens` or `content_filter`
+ /// (the server-side safety filter activated and cut off the response). + ///
+ [global::Newtonsoft.Json.JsonProperty("reason")] + public global::G.RealtimeResponseStatusDetailsReason? Reason { get; set; } + + /// + /// A description of the error that caused the response to fail,
+ /// populated when the `status` is `failed`. + ///
+ [global::Newtonsoft.Json.JsonProperty("error")] + public global::G.RealtimeResponseStatusDetailsError? Error { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error that caused the response to fail, corresponding
+ /// with the `status` field (`cancelled`, `incomplete`, `failed`). + /// + /// + /// The reason the Response did not complete. For a `cancelled` Response,
+ /// one of `turn_detected` (the server VAD detected a new start of speech)
+ /// or `client_cancelled` (the client sent a cancel event). For an
+ /// `incomplete` Response, one of `max_output_tokens` or `content_filter`
+ /// (the server-side safety filter activated and cut off the response). + /// + /// + /// A description of the error that caused the response to fail,
+ /// populated when the `status` is `failed`. + /// + public RealtimeResponseStatusDetails( + global::G.RealtimeResponseStatusDetailsType? type, + global::G.RealtimeResponseStatusDetailsReason? reason, + global::G.RealtimeResponseStatusDetailsError? error) + { + this.Type = type; + this.Reason = reason; + this.Error = error; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseStatusDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsError.Json.g.verified.cs new file mode 100644 index 0000000000..eba02c8e13 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsError.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseStatusDetailsError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseStatusDetailsError? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsError.g.verified.cs new file mode 100644 index 0000000000..1bba410e5a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsError.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsError.g.cs + +#nullable enable + +namespace G +{ + /// + /// A description of the error that caused the response to fail,
+ /// populated when the `status` is `failed`. + ///
+ public sealed partial class RealtimeResponseStatusDetailsError + { + /// + /// The type of error. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::Newtonsoft.Json.JsonProperty("code")] + public string? Code { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error. + /// + /// + /// Error code, if any. + /// + public RealtimeResponseStatusDetailsError( + string? type, + string? code) + { + this.Type = type; + this.Code = code; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseStatusDetailsError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsReason.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsReason.g.verified.cs new file mode 100644 index 0000000000..eae4e4bf98 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsReason.g.verified.cs @@ -0,0 +1,73 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsReason.g.cs + +#nullable enable + +namespace G +{ + /// + /// The reason the Response did not complete. For a `cancelled` Response,
+ /// one of `turn_detected` (the server VAD detected a new start of speech)
+ /// or `client_cancelled` (the client sent a cancel event). For an
+ /// `incomplete` Response, one of `max_output_tokens` or `content_filter`
+ /// (the server-side safety filter activated and cut off the response). + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeResponseStatusDetailsReason + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="turn_detected")] + TurnDetected, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="client_cancelled")] + ClientCancelled, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="max_output_tokens")] + MaxOutputTokens, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="content_filter")] + ContentFilter, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseStatusDetailsReasonExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseStatusDetailsReason value) + { + return value switch + { + RealtimeResponseStatusDetailsReason.TurnDetected => "turn_detected", + RealtimeResponseStatusDetailsReason.ClientCancelled => "client_cancelled", + RealtimeResponseStatusDetailsReason.MaxOutputTokens => "max_output_tokens", + RealtimeResponseStatusDetailsReason.ContentFilter => "content_filter", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseStatusDetailsReason? ToEnum(string value) + { + return value switch + { + "turn_detected" => RealtimeResponseStatusDetailsReason.TurnDetected, + "client_cancelled" => RealtimeResponseStatusDetailsReason.ClientCancelled, + "max_output_tokens" => RealtimeResponseStatusDetailsReason.MaxOutputTokens, + "content_filter" => RealtimeResponseStatusDetailsReason.ContentFilter, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsType.g.verified.cs new file mode 100644 index 0000000000..ae65996421 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseStatusDetailsType.g.verified.cs @@ -0,0 +1,70 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of error that caused the response to fail, corresponding
+ /// with the `status` field (`cancelled`, `incomplete`, `failed`). + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeResponseStatusDetailsType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="completed")] + Completed, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="cancelled")] + Cancelled, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="failed")] + Failed, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="incomplete")] + Incomplete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseStatusDetailsTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseStatusDetailsType value) + { + return value switch + { + RealtimeResponseStatusDetailsType.Completed => "completed", + RealtimeResponseStatusDetailsType.Cancelled => "cancelled", + RealtimeResponseStatusDetailsType.Failed => "failed", + RealtimeResponseStatusDetailsType.Incomplete => "incomplete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseStatusDetailsType? ToEnum(string value) + { + return value switch + { + "completed" => RealtimeResponseStatusDetailsType.Completed, + "cancelled" => RealtimeResponseStatusDetailsType.Cancelled, + "failed" => RealtimeResponseStatusDetailsType.Failed, + "incomplete" => RealtimeResponseStatusDetailsType.Incomplete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsage.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsage.Json.g.verified.cs new file mode 100644 index 0000000000..5dc2063e65 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsage.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeResponseUsage.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseUsage + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseUsage? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsage.g.verified.cs new file mode 100644 index 0000000000..e6365ec17f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsage.g.verified.cs @@ -0,0 +1,96 @@ +//HintName: G.Models.RealtimeResponseUsage.g.cs + +#nullable enable + +namespace G +{ + /// + /// Usage statistics for the Response, this will correspond to billing. A
+ /// Realtime API session will maintain a conversation context and append new
+ /// Items to the Conversation, thus output from previous turns (text and
+ /// audio tokens) will become the input for later turns. + ///
+ public sealed partial class RealtimeResponseUsage + { + /// + /// The total number of tokens in the Response including input and output
+ /// text and audio tokens. + ///
+ [global::Newtonsoft.Json.JsonProperty("total_tokens")] + public int? TotalTokens { get; set; } + + /// + /// The number of input tokens used in the Response, including text and
+ /// audio tokens. + ///
+ [global::Newtonsoft.Json.JsonProperty("input_tokens")] + public int? InputTokens { get; set; } + + /// + /// The number of output tokens sent in the Response, including text and
+ /// audio tokens. + ///
+ [global::Newtonsoft.Json.JsonProperty("output_tokens")] + public int? OutputTokens { get; set; } + + /// + /// Details about the input tokens used in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("input_token_details")] + public global::G.RealtimeResponseUsageInputTokenDetails? InputTokenDetails { get; set; } + + /// + /// Details about the output tokens used in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("output_token_details")] + public global::G.RealtimeResponseUsageOutputTokenDetails? OutputTokenDetails { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The total number of tokens in the Response including input and output
+ /// text and audio tokens. + /// + /// + /// The number of input tokens used in the Response, including text and
+ /// audio tokens. + /// + /// + /// The number of output tokens sent in the Response, including text and
+ /// audio tokens. + /// + /// + /// Details about the input tokens used in the Response. + /// + /// + /// Details about the output tokens used in the Response. + /// + public RealtimeResponseUsage( + int? totalTokens, + int? inputTokens, + int? outputTokens, + global::G.RealtimeResponseUsageInputTokenDetails? inputTokenDetails, + global::G.RealtimeResponseUsageOutputTokenDetails? outputTokenDetails) + { + this.TotalTokens = totalTokens; + this.InputTokens = inputTokens; + this.OutputTokens = outputTokens; + this.InputTokenDetails = inputTokenDetails; + this.OutputTokenDetails = outputTokenDetails; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseUsage() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.verified.cs new file mode 100644 index 0000000000..5e4aca0f76 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseUsageInputTokenDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseUsageInputTokenDetails? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.g.verified.cs new file mode 100644 index 0000000000..ca006e4b93 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.g.verified.cs @@ -0,0 +1,65 @@ +//HintName: G.Models.RealtimeResponseUsageInputTokenDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details about the input tokens used in the Response. + /// + public sealed partial class RealtimeResponseUsageInputTokenDetails + { + /// + /// The number of cached tokens used in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("cached_tokens")] + public int? CachedTokens { get; set; } + + /// + /// The number of text tokens used in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("text_tokens")] + public int? TextTokens { get; set; } + + /// + /// The number of audio tokens used in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The number of cached tokens used in the Response. + /// + /// + /// The number of text tokens used in the Response. + /// + /// + /// The number of audio tokens used in the Response. + /// + public RealtimeResponseUsageInputTokenDetails( + int? cachedTokens, + int? textTokens, + int? audioTokens) + { + this.CachedTokens = cachedTokens; + this.TextTokens = textTokens; + this.AudioTokens = audioTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseUsageInputTokenDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.verified.cs new file mode 100644 index 0000000000..f68bc804b8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseUsageOutputTokenDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseUsageOutputTokenDetails? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.g.verified.cs new file mode 100644 index 0000000000..b328e98d7b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.RealtimeResponseUsageOutputTokenDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details about the output tokens used in the Response. + /// + public sealed partial class RealtimeResponseUsageOutputTokenDetails + { + /// + /// The number of text tokens used in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("text_tokens")] + public int? TextTokens { get; set; } + + /// + /// The number of audio tokens used in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The number of text tokens used in the Response. + /// + /// + /// The number of audio tokens used in the Response. + /// + public RealtimeResponseUsageOutputTokenDetails( + int? textTokens, + int? audioTokens) + { + this.TextTokens = textTokens; + this.AudioTokens = audioTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseUsageOutputTokenDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreated.Json.g.verified.cs new file mode 100644 index 0000000000..1c06350b32 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationCreated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreated.g.verified.cs new file mode 100644 index 0000000000..3232b55027 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreated.g.verified.cs @@ -0,0 +1,65 @@ +//HintName: G.Models.RealtimeServerEventConversationCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a conversation is created. Emitted right after session creation. + /// + public sealed partial class RealtimeServerEventConversationCreated + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `conversation.created`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventConversationCreatedType Type { get; set; } + + /// + /// The conversation resource. + /// + [global::Newtonsoft.Json.JsonProperty("conversation", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeServerEventConversationCreatedConversation Conversation { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.created`. + /// + /// + /// The conversation resource. + /// + public RealtimeServerEventConversationCreated( + string eventId, + global::G.RealtimeServerEventConversationCreatedConversation conversation, + global::G.RealtimeServerEventConversationCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Conversation = conversation ?? throw new global::System.ArgumentNullException(nameof(conversation)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.verified.cs new file mode 100644 index 0000000000..9254ee92a8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationCreatedConversation + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationCreatedConversation? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.g.verified.cs new file mode 100644 index 0000000000..1ac5d3b405 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.RealtimeServerEventConversationCreatedConversation.g.cs + +#nullable enable + +namespace G +{ + /// + /// The conversation resource. + /// + public sealed partial class RealtimeServerEventConversationCreatedConversation + { + /// + /// The unique ID of the conversation. + /// + [global::Newtonsoft.Json.JsonProperty("id")] + public string? Id { get; set; } + + /// + /// The object type, must be `realtime.conversation`. + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public string? Object { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the conversation. + /// + /// + /// The object type, must be `realtime.conversation`. + /// + public RealtimeServerEventConversationCreatedConversation( + string? id, + string? @object) + { + this.Id = id; + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationCreatedConversation() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedType.g.verified.cs new file mode 100644 index 0000000000..68b1aa50be --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationCreatedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventConversationCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.created`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventConversationCreatedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.created")] + ConversationCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationCreatedType value) + { + return value switch + { + RealtimeServerEventConversationCreatedType.ConversationCreated => "conversation.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationCreatedType? ToEnum(string value) + { + return value switch + { + "conversation.created" => RealtimeServerEventConversationCreatedType.ConversationCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreated.Json.g.verified.cs new file mode 100644 index 0000000000..16ee7f9edf --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationItemCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemCreated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreated.g.verified.cs new file mode 100644 index 0000000000..42b02ec458 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreated.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.Models.RealtimeServerEventConversationItemCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a conversation item is created. There are several scenarios that
+ /// produce this event:
+ /// - The server is generating a Response, which if successful will produce
+ /// either one or two Items, which will be of type `message`
+ /// (role `assistant`) or type `function_call`.
+ /// - The input audio buffer has been committed, either by the client or the
+ /// server (in `server_vad` mode). The server will take the content of the
+ /// input audio buffer and add it to a new user message Item.
+ /// - The client has sent a `conversation.item.create` event to add a new Item
+ /// to the Conversation. + ///
+ public sealed partial class RealtimeServerEventConversationItemCreated + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `conversation.item.created`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventConversationItemCreatedType Type { get; set; } + + /// + /// The ID of the preceding item in the Conversation context, allows the
+ /// client to understand the order of the conversation. + ///
+ [global::Newtonsoft.Json.JsonProperty("previous_item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string PreviousItemId { get; set; } = default!; + + /// + /// The item to add to the conversation. + /// + [global::Newtonsoft.Json.JsonProperty("item", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeConversationItem Item { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.item.created`. + /// + /// + /// The ID of the preceding item in the Conversation context, allows the
+ /// client to understand the order of the conversation. + /// + /// + /// The item to add to the conversation. + /// + public RealtimeServerEventConversationItemCreated( + string eventId, + string previousItemId, + global::G.RealtimeConversationItem item, + global::G.RealtimeServerEventConversationItemCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.PreviousItemId = previousItemId ?? throw new global::System.ArgumentNullException(nameof(previousItemId)); + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreatedType.g.verified.cs new file mode 100644 index 0000000000..40a5014d4e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemCreatedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventConversationItemCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.created`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventConversationItemCreatedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.created")] + ConversationItemCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemCreatedType value) + { + return value switch + { + RealtimeServerEventConversationItemCreatedType.ConversationItemCreated => "conversation.item.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemCreatedType? ToEnum(string value) + { + return value switch + { + "conversation.item.created" => RealtimeServerEventConversationItemCreatedType.ConversationItemCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeleted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeleted.Json.g.verified.cs new file mode 100644 index 0000000000..9180384146 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeleted.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationItemDeleted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemDeleted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemDeleted? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeleted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeleted.g.verified.cs new file mode 100644 index 0000000000..03a3576c00 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeleted.g.verified.cs @@ -0,0 +1,67 @@ +//HintName: G.Models.RealtimeServerEventConversationItemDeleted.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an item in the conversation is deleted by the client with a
+ /// `conversation.item.delete` event. This event is used to synchronize the
+ /// server's understanding of the conversation history with the client's view. + ///
+ public sealed partial class RealtimeServerEventConversationItemDeleted + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `conversation.item.deleted`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventConversationItemDeletedType Type { get; set; } + + /// + /// The ID of the item that was deleted. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.item.deleted`. + /// + /// + /// The ID of the item that was deleted. + /// + public RealtimeServerEventConversationItemDeleted( + string eventId, + string itemId, + global::G.RealtimeServerEventConversationItemDeletedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemDeleted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeletedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeletedType.g.verified.cs new file mode 100644 index 0000000000..32af191d71 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemDeletedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventConversationItemDeletedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.deleted`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventConversationItemDeletedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.deleted")] + ConversationItemDeleted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemDeletedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemDeletedType value) + { + return value switch + { + RealtimeServerEventConversationItemDeletedType.ConversationItemDeleted => "conversation.item.deleted", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemDeletedType? ToEnum(string value) + { + return value switch + { + "conversation.item.deleted" => RealtimeServerEventConversationItemDeletedType.ConversationItemDeleted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.verified.cs new file mode 100644 index 0000000000..88b74f2062 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.verified.cs new file mode 100644 index 0000000000..1de9e82357 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.verified.cs @@ -0,0 +1,97 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs + +#nullable enable + +namespace G +{ + /// + /// This event is the output of audio transcription for user audio written to the
+ /// user audio buffer. Transcription begins when the input audio buffer is
+ /// committed by the client or server (in `server_vad` mode). Transcription runs
+ /// asynchronously with Response creation, so this event may come before or after
+ /// the Response events.
+ /// Realtime API models accept audio natively, and thus input transcription is a
+ /// separate process run on a separate ASR (Automatic Speech Recognition) model,
+ /// currently always `whisper-1`. Thus the transcript may diverge somewhat from
+ /// the model's interpretation, and should be treated as a rough guide. + ///
+ public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.completed`. + ///
+ [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType Type { get; set; } + + /// + /// The ID of the user message item containing the audio. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the content part containing the audio. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The transcribed text. + /// + [global::Newtonsoft.Json.JsonProperty("transcript", Required = global::Newtonsoft.Json.Required.Always)] + public string Transcript { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.completed`. + /// + /// + /// The ID of the user message item containing the audio. + /// + /// + /// The index of the content part containing the audio. + /// + /// + /// The transcribed text. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionCompleted( + string eventId, + string itemId, + int contentIndex, + string transcript, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.Transcript = transcript ?? throw new global::System.ArgumentNullException(nameof(transcript)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionCompleted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs new file mode 100644 index 0000000000..6a6d2e6159 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs @@ -0,0 +1,49 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.completed`. + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.input_audio_transcription.completed")] + ConversationItemInputAudioTranscriptionCompleted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType value) + { + return value switch + { + RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.ConversationItemInputAudioTranscriptionCompleted => "conversation.item.input_audio_transcription.completed", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType? ToEnum(string value) + { + return value switch + { + "conversation.item.input_audio_transcription.completed" => RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.ConversationItemInputAudioTranscriptionCompleted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.verified.cs new file mode 100644 index 0000000000..46e7888a5a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailed + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.verified.cs new file mode 100644 index 0000000000..3ca6532962 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.verified.cs @@ -0,0 +1,91 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when input audio transcription is configured, and a transcription
+ /// request for a user message failed. These events are separate from other
+ /// `error` events so that the client can identify the related Item. + ///
+ public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailed + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.failed`. + ///
+ [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType Type { get; set; } + + /// + /// The ID of the user message item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the content part containing the audio. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// Details of the transcription error. + /// + [global::Newtonsoft.Json.JsonProperty("error", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError Error { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.failed`. + /// + /// + /// The ID of the user message item. + /// + /// + /// The index of the content part containing the audio. + /// + /// + /// Details of the transcription error. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionFailed( + string eventId, + string itemId, + int contentIndex, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError error, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.Error = error ?? throw new global::System.ArgumentNullException(nameof(error)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionFailed() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.verified.cs new file mode 100644 index 0000000000..ecbb83a0d6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailedError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.verified.cs new file mode 100644 index 0000000000..39f9d12d73 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details of the transcription error. + /// + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailedError + { + /// + /// The type of error. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::Newtonsoft.Json.JsonProperty("code")] + public string? Code { get; set; } + + /// + /// A human-readable error message. + /// + [global::Newtonsoft.Json.JsonProperty("message")] + public string? Message { get; set; } + + /// + /// Parameter related to the error, if any. + /// + [global::Newtonsoft.Json.JsonProperty("param")] + public string? Param { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error. + /// + /// + /// Error code, if any. + /// + /// + /// A human-readable error message. + /// + /// + /// Parameter related to the error, if any. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionFailedError( + string? type, + string? code, + string? message, + string? param) + { + this.Type = type; + this.Code = code; + this.Message = message; + this.Param = param; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionFailedError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs new file mode 100644 index 0000000000..bcf38983eb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs @@ -0,0 +1,49 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.failed`. + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventConversationItemInputAudioTranscriptionFailedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.input_audio_transcription.failed")] + ConversationItemInputAudioTranscriptionFailed, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemInputAudioTranscriptionFailedType value) + { + return value switch + { + RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.ConversationItemInputAudioTranscriptionFailed => "conversation.item.input_audio_transcription.failed", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemInputAudioTranscriptionFailedType? ToEnum(string value) + { + return value switch + { + "conversation.item.input_audio_transcription.failed" => RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.ConversationItemInputAudioTranscriptionFailed, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncated.Json.g.verified.cs new file mode 100644 index 0000000000..405e2f9d3f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventConversationItemTruncated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemTruncated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemTruncated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncated.g.verified.cs new file mode 100644 index 0000000000..52bfded54f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncated.g.verified.cs @@ -0,0 +1,91 @@ +//HintName: G.Models.RealtimeServerEventConversationItemTruncated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an earlier assistant audio message item is truncated by the
+ /// client with a `conversation.item.truncate` event. This event is used to
+ /// synchronize the server's understanding of the audio with the client's playback.
+ /// This action will truncate the audio and remove the server-side text transcript
+ /// to ensure there is no text in the context that hasn't been heard by the user. + ///
+ public sealed partial class RealtimeServerEventConversationItemTruncated + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `conversation.item.truncated`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventConversationItemTruncatedType Type { get; set; } + + /// + /// The ID of the assistant message item that was truncated. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the content part that was truncated. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The duration up to which the audio was truncated, in milliseconds. + /// + [global::Newtonsoft.Json.JsonProperty("audio_end_ms", Required = global::Newtonsoft.Json.Required.Always)] + public int AudioEndMs { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.item.truncated`. + /// + /// + /// The ID of the assistant message item that was truncated. + /// + /// + /// The index of the content part that was truncated. + /// + /// + /// The duration up to which the audio was truncated, in milliseconds. + /// + public RealtimeServerEventConversationItemTruncated( + string eventId, + string itemId, + int contentIndex, + int audioEndMs, + global::G.RealtimeServerEventConversationItemTruncatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.AudioEndMs = audioEndMs; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemTruncated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncatedType.g.verified.cs new file mode 100644 index 0000000000..42df7d8399 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventConversationItemTruncatedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventConversationItemTruncatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.truncated`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventConversationItemTruncatedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="conversation.item.truncated")] + ConversationItemTruncated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemTruncatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemTruncatedType value) + { + return value switch + { + RealtimeServerEventConversationItemTruncatedType.ConversationItemTruncated => "conversation.item.truncated", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemTruncatedType? ToEnum(string value) + { + return value switch + { + "conversation.item.truncated" => RealtimeServerEventConversationItemTruncatedType.ConversationItemTruncated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventError.Json.g.verified.cs new file mode 100644 index 0000000000..48b62257e1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventError.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventError? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventError.g.verified.cs new file mode 100644 index 0000000000..f6f9bc19cb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventError.g.verified.cs @@ -0,0 +1,67 @@ +//HintName: G.Models.RealtimeServerEventError.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an error occurs, which could be a client problem or a server
+ /// problem. Most errors are recoverable and the session will stay open, we
+ /// recommend to implementors to monitor and log error messages by default. + ///
+ public sealed partial class RealtimeServerEventError + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `error`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventErrorType Type { get; set; } + + /// + /// Details of the error. + /// + [global::Newtonsoft.Json.JsonProperty("error", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeServerEventErrorError Error { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `error`. + /// + /// + /// Details of the error. + /// + public RealtimeServerEventError( + string eventId, + global::G.RealtimeServerEventErrorError error, + global::G.RealtimeServerEventErrorType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Error = error ?? throw new global::System.ArgumentNullException(nameof(error)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorError.Json.g.verified.cs new file mode 100644 index 0000000000..9b88c5988f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorError.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventErrorError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventErrorError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventErrorError? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorError.g.verified.cs new file mode 100644 index 0000000000..6d92f0c559 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorError.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.Models.RealtimeServerEventErrorError.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details of the error. + /// + public sealed partial class RealtimeServerEventErrorError + { + /// + /// The type of error (e.g., "invalid_request_error", "server_error"). + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::Newtonsoft.Json.JsonProperty("code")] + public string? Code { get; set; } + + /// + /// A human-readable error message. + /// + [global::Newtonsoft.Json.JsonProperty("message")] + public string? Message { get; set; } + + /// + /// Parameter related to the error, if any. + /// + [global::Newtonsoft.Json.JsonProperty("param")] + public string? Param { get; set; } + + /// + /// The event_id of the client event that caused the error, if applicable. + /// + [global::Newtonsoft.Json.JsonProperty("event_id")] + public string? EventId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error (e.g., "invalid_request_error", "server_error"). + /// + /// + /// Error code, if any. + /// + /// + /// A human-readable error message. + /// + /// + /// Parameter related to the error, if any. + /// + /// + /// The event_id of the client event that caused the error, if applicable. + /// + public RealtimeServerEventErrorError( + string? type, + string? code, + string? message, + string? param, + string? eventId) + { + this.Type = type; + this.Code = code; + this.Message = message; + this.Param = param; + this.EventId = eventId; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventErrorError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorType.g.verified.cs new file mode 100644 index 0000000000..ad2b1df194 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventErrorType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventErrorType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `error`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventErrorType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="error")] + Error, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventErrorTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventErrorType value) + { + return value switch + { + RealtimeServerEventErrorType.Error => "error", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventErrorType? ToEnum(string value) + { + return value switch + { + "error" => RealtimeServerEventErrorType.Error, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.verified.cs new file mode 100644 index 0000000000..91aec97efb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferCleared + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferCleared? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.g.verified.cs new file mode 100644 index 0000000000..3c66f0a1b9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCleared.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the input audio buffer is cleared by the client with a
+ /// `input_audio_buffer.clear` event. + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferCleared + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `input_audio_buffer.cleared`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventInputAudioBufferClearedType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.cleared`. + /// + public RealtimeServerEventInputAudioBufferCleared( + string eventId, + global::G.RealtimeServerEventInputAudioBufferClearedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferCleared() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs new file mode 100644 index 0000000000..cceca3ee12 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferClearedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.cleared`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventInputAudioBufferClearedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio_buffer.cleared")] + InputAudioBufferCleared, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferClearedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferClearedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferClearedType.InputAudioBufferCleared => "input_audio_buffer.cleared", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferClearedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.cleared" => RealtimeServerEventInputAudioBufferClearedType.InputAudioBufferCleared, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.verified.cs new file mode 100644 index 0000000000..423d64f051 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferCommitted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferCommitted? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.g.verified.cs new file mode 100644 index 0000000000..ca4b14abef --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.g.verified.cs @@ -0,0 +1,79 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an input audio buffer is committed, either by the client or
+ /// automatically in server VAD mode. The `item_id` property is the ID of the user
+ /// message item that will be created, thus a `conversation.item.created` event
+ /// will also be sent to the client. + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferCommitted + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `input_audio_buffer.committed`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventInputAudioBufferCommittedType Type { get; set; } + + /// + /// The ID of the preceding item after which the new item will be inserted. + /// + [global::Newtonsoft.Json.JsonProperty("previous_item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string PreviousItemId { get; set; } = default!; + + /// + /// The ID of the user message item that will be created. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.committed`. + /// + /// + /// The ID of the preceding item after which the new item will be inserted. + /// + /// + /// The ID of the user message item that will be created. + /// + public RealtimeServerEventInputAudioBufferCommitted( + string eventId, + string previousItemId, + string itemId, + global::G.RealtimeServerEventInputAudioBufferCommittedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.PreviousItemId = previousItemId ?? throw new global::System.ArgumentNullException(nameof(previousItemId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferCommitted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs new file mode 100644 index 0000000000..4cfb7b562a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.committed`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventInputAudioBufferCommittedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio_buffer.committed")] + InputAudioBufferCommitted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferCommittedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferCommittedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferCommittedType.InputAudioBufferCommitted => "input_audio_buffer.committed", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferCommittedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.committed" => RealtimeServerEventInputAudioBufferCommittedType.InputAudioBufferCommitted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.verified.cs new file mode 100644 index 0000000000..d0a78b4f4a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferSpeechStarted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferSpeechStarted? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.verified.cs new file mode 100644 index 0000000000..94d4baa42d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.verified.cs @@ -0,0 +1,90 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs + +#nullable enable + +namespace G +{ + /// + /// Sent by the server when in `server_vad` mode to indicate that speech has been
+ /// detected in the audio buffer. This can happen any time audio is added to the
+ /// buffer (unless speech is already detected). The client may want to use this
+ /// event to interrupt audio playback or provide visual feedback to the user.
+ /// The client should expect to receive a `input_audio_buffer.speech_stopped` event
+ /// when speech stops. The `item_id` property is the ID of the user message item
+ /// that will be created when speech stops and will also be included in the
+ /// `input_audio_buffer.speech_stopped` event (unless the client manually commits
+ /// the audio buffer during VAD activation). + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferSpeechStarted + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `input_audio_buffer.speech_started`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventInputAudioBufferSpeechStartedType Type { get; set; } + + /// + /// Milliseconds from the start of all audio written to the buffer during the
+ /// session when speech was first detected. This will correspond to the
+ /// beginning of audio sent to the model, and thus includes the
+ /// `prefix_padding_ms` configured in the Session. + ///
+ [global::Newtonsoft.Json.JsonProperty("audio_start_ms", Required = global::Newtonsoft.Json.Required.Always)] + public int AudioStartMs { get; set; } = default!; + + /// + /// The ID of the user message item that will be created when speech stops. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.speech_started`. + /// + /// + /// Milliseconds from the start of all audio written to the buffer during the
+ /// session when speech was first detected. This will correspond to the
+ /// beginning of audio sent to the model, and thus includes the
+ /// `prefix_padding_ms` configured in the Session. + /// + /// + /// The ID of the user message item that will be created when speech stops. + /// + public RealtimeServerEventInputAudioBufferSpeechStarted( + string eventId, + int audioStartMs, + string itemId, + global::G.RealtimeServerEventInputAudioBufferSpeechStartedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.AudioStartMs = audioStartMs; + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferSpeechStarted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs new file mode 100644 index 0000000000..59717b7db5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.speech_started`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventInputAudioBufferSpeechStartedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio_buffer.speech_started")] + InputAudioBufferSpeechStarted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferSpeechStartedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferSpeechStartedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferSpeechStartedType.InputAudioBufferSpeechStarted => "input_audio_buffer.speech_started", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferSpeechStartedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.speech_started" => RealtimeServerEventInputAudioBufferSpeechStartedType.InputAudioBufferSpeechStarted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.verified.cs new file mode 100644 index 0000000000..3231890996 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferSpeechStopped + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferSpeechStopped? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.verified.cs new file mode 100644 index 0000000000..1ab71b4305 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.verified.cs @@ -0,0 +1,82 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned in `server_vad` mode when the server detects the end of speech in
+ /// the audio buffer. The server will also send an `conversation.item.created`
+ /// event with the user message item that is created from the audio buffer. + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferSpeechStopped + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `input_audio_buffer.speech_stopped`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType Type { get; set; } + + /// + /// Milliseconds since the session started when speech stopped. This will
+ /// correspond to the end of audio sent to the model, and thus includes the
+ /// `min_silence_duration_ms` configured in the Session. + ///
+ [global::Newtonsoft.Json.JsonProperty("audio_end_ms", Required = global::Newtonsoft.Json.Required.Always)] + public int AudioEndMs { get; set; } = default!; + + /// + /// The ID of the user message item that will be created. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.speech_stopped`. + /// + /// + /// Milliseconds since the session started when speech stopped. This will
+ /// correspond to the end of audio sent to the model, and thus includes the
+ /// `min_silence_duration_ms` configured in the Session. + /// + /// + /// The ID of the user message item that will be created. + /// + public RealtimeServerEventInputAudioBufferSpeechStopped( + string eventId, + int audioEndMs, + string itemId, + global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.AudioEndMs = audioEndMs; + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferSpeechStopped() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs new file mode 100644 index 0000000000..ea4e19b834 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.speech_stopped`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventInputAudioBufferSpeechStoppedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="input_audio_buffer.speech_stopped")] + InputAudioBufferSpeechStopped, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferSpeechStoppedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferSpeechStoppedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferSpeechStoppedType.InputAudioBufferSpeechStopped => "input_audio_buffer.speech_stopped", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferSpeechStoppedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.speech_stopped" => RealtimeServerEventInputAudioBufferSpeechStoppedType.InputAudioBufferSpeechStopped, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.verified.cs new file mode 100644 index 0000000000..2b8eef117a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventRateLimitsUpdated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventRateLimitsUpdated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.g.verified.cs new file mode 100644 index 0000000000..2c02afc64e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.g.verified.cs @@ -0,0 +1,68 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Emitted at the beginning of a Response to indicate the updated rate limits.
+ /// When a Response is created some tokens will be "reserved" for the output
+ /// tokens, the rate limits shown here reflect that reservation, which is then
+ /// adjusted accordingly once the Response is completed. + ///
+ public sealed partial class RealtimeServerEventRateLimitsUpdated + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `rate_limits.updated`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventRateLimitsUpdatedType Type { get; set; } + + /// + /// List of rate limit information. + /// + [global::Newtonsoft.Json.JsonProperty("rate_limits", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList RateLimits { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `rate_limits.updated`. + /// + /// + /// List of rate limit information. + /// + public RealtimeServerEventRateLimitsUpdated( + string eventId, + global::System.Collections.Generic.IList rateLimits, + global::G.RealtimeServerEventRateLimitsUpdatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.RateLimits = rateLimits ?? throw new global::System.ArgumentNullException(nameof(rateLimits)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventRateLimitsUpdated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.verified.cs new file mode 100644 index 0000000000..59c25b097e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventRateLimitsUpdatedRateLimit? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.verified.cs new file mode 100644 index 0000000000..fcecfde418 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit + { + /// + /// The name of the rate limit (`requests`, `tokens`). + /// + [global::Newtonsoft.Json.JsonProperty("name")] + public string? Name { get; set; } + + /// + /// The maximum allowed value for the rate limit. + /// + [global::Newtonsoft.Json.JsonProperty("limit")] + public int? Limit { get; set; } + + /// + /// The remaining value before the limit is reached. + /// + [global::Newtonsoft.Json.JsonProperty("remaining")] + public int? Remaining { get; set; } + + /// + /// Seconds until the rate limit resets. + /// + [global::Newtonsoft.Json.JsonProperty("reset_seconds")] + public double? ResetSeconds { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The name of the rate limit (`requests`, `tokens`). + /// + /// + /// The maximum allowed value for the rate limit. + /// + /// + /// The remaining value before the limit is reached. + /// + /// + /// Seconds until the rate limit resets. + /// + public RealtimeServerEventRateLimitsUpdatedRateLimit( + string? name, + int? limit, + int? remaining, + double? resetSeconds) + { + this.Name = name; + this.Limit = limit; + this.Remaining = remaining; + this.ResetSeconds = resetSeconds; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventRateLimitsUpdatedRateLimit() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs new file mode 100644 index 0000000000..891808eabe --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `rate_limits.updated`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventRateLimitsUpdatedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="rate_limits.updated")] + RateLimitsUpdated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventRateLimitsUpdatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventRateLimitsUpdatedType value) + { + return value switch + { + RealtimeServerEventRateLimitsUpdatedType.RateLimitsUpdated => "rate_limits.updated", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventRateLimitsUpdatedType? ToEnum(string value) + { + return value switch + { + "rate_limits.updated" => RealtimeServerEventRateLimitsUpdatedType.RateLimitsUpdated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDelta.Json.g.verified.cs new file mode 100644 index 0000000000..d6a1390c6c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDelta.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioDelta? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDelta.g.verified.cs new file mode 100644 index 0000000000..1d1c6834f7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDelta.g.verified.cs @@ -0,0 +1,109 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated audio is updated. + /// + public sealed partial class RealtimeServerEventResponseAudioDelta + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.audio.delta`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseAudioDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// Base64-encoded audio data delta. + /// + [global::Newtonsoft.Json.JsonProperty("delta", Required = global::Newtonsoft.Json.Required.Always)] + public string Delta { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// Base64-encoded audio data delta. + /// + public RealtimeServerEventResponseAudioDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string delta, + global::G.RealtimeServerEventResponseAudioDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDeltaType.g.verified.cs new file mode 100644 index 0000000000..240716475a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDeltaType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio.delta`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseAudioDeltaType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.audio.delta")] + ResponseAudioDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioDeltaType value) + { + return value switch + { + RealtimeServerEventResponseAudioDeltaType.ResponseAudioDelta => "response.audio.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioDeltaType? ToEnum(string value) + { + return value switch + { + "response.audio.delta" => RealtimeServerEventResponseAudioDeltaType.ResponseAudioDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDone.Json.g.verified.cs new file mode 100644 index 0000000000..38b186ee26 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDone.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioDone? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDone.g.verified.cs new file mode 100644 index 0000000000..19d4942ea4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDone.g.verified.cs @@ -0,0 +1,99 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated audio is done. Also emitted when a Response
+ /// is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseAudioDone + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.audio.done`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseAudioDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + public RealtimeServerEventResponseAudioDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + global::G.RealtimeServerEventResponseAudioDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDoneType.g.verified.cs new file mode 100644 index 0000000000..79c5dd11e2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioDoneType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio.done`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseAudioDoneType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.audio.done")] + ResponseAudioDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioDoneType value) + { + return value switch + { + RealtimeServerEventResponseAudioDoneType.ResponseAudioDone => "response.audio.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioDoneType? ToEnum(string value) + { + return value switch + { + "response.audio.done" => RealtimeServerEventResponseAudioDoneType.ResponseAudioDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.verified.cs new file mode 100644 index 0000000000..e07e14e333 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioTranscriptDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioTranscriptDelta? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.verified.cs new file mode 100644 index 0000000000..cf025703d8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.verified.cs @@ -0,0 +1,109 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated transcription of audio output is updated. + /// + public sealed partial class RealtimeServerEventResponseAudioTranscriptDelta + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.audio_transcript.delta`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseAudioTranscriptDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The transcript delta. + /// + [global::Newtonsoft.Json.JsonProperty("delta", Required = global::Newtonsoft.Json.Required.Always)] + public string Delta { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio_transcript.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The transcript delta. + /// + public RealtimeServerEventResponseAudioTranscriptDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string delta, + global::G.RealtimeServerEventResponseAudioTranscriptDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioTranscriptDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs new file mode 100644 index 0000000000..0589dbe812 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio_transcript.delta`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseAudioTranscriptDeltaType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.audio_transcript.delta")] + ResponseAudioTranscriptDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioTranscriptDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioTranscriptDeltaType value) + { + return value switch + { + RealtimeServerEventResponseAudioTranscriptDeltaType.ResponseAudioTranscriptDelta => "response.audio_transcript.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioTranscriptDeltaType? ToEnum(string value) + { + return value switch + { + "response.audio_transcript.delta" => RealtimeServerEventResponseAudioTranscriptDeltaType.ResponseAudioTranscriptDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.verified.cs new file mode 100644 index 0000000000..4f53a5a5f5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioTranscriptDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioTranscriptDone? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.verified.cs new file mode 100644 index 0000000000..46745a9ef4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.verified.cs @@ -0,0 +1,111 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated transcription of audio output is done
+ /// streaming. Also emitted when a Response is interrupted, incomplete, or
+ /// cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseAudioTranscriptDone + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.audio_transcript.done`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseAudioTranscriptDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The final transcript of the audio. + /// + [global::Newtonsoft.Json.JsonProperty("transcript", Required = global::Newtonsoft.Json.Required.Always)] + public string Transcript { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio_transcript.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The final transcript of the audio. + /// + public RealtimeServerEventResponseAudioTranscriptDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string transcript, + global::G.RealtimeServerEventResponseAudioTranscriptDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Transcript = transcript ?? throw new global::System.ArgumentNullException(nameof(transcript)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioTranscriptDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs new file mode 100644 index 0000000000..23cdc4fd52 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio_transcript.done`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseAudioTranscriptDoneType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.audio_transcript.done")] + ResponseAudioTranscriptDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioTranscriptDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioTranscriptDoneType value) + { + return value switch + { + RealtimeServerEventResponseAudioTranscriptDoneType.ResponseAudioTranscriptDone => "response.audio_transcript.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioTranscriptDoneType? ToEnum(string value) + { + return value switch + { + "response.audio_transcript.done" => RealtimeServerEventResponseAudioTranscriptDoneType.ResponseAudioTranscriptDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.verified.cs new file mode 100644 index 0000000000..38f039458f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartAdded + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartAdded? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.g.verified.cs new file mode 100644 index 0000000000..48b74a71f3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.g.verified.cs @@ -0,0 +1,110 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAdded.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a new content part is added to an assistant message item during
+ /// response generation. + ///
+ public sealed partial class RealtimeServerEventResponseContentPartAdded + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.content_part.added`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseContentPartAddedType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item to which the content part was added. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The content part that was added. + /// + [global::Newtonsoft.Json.JsonProperty("part", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeServerEventResponseContentPartAddedPart Part { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.content_part.added`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item to which the content part was added. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The content part that was added. + /// + public RealtimeServerEventResponseContentPartAdded( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + global::G.RealtimeServerEventResponseContentPartAddedPart part, + global::G.RealtimeServerEventResponseContentPartAddedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Part = part ?? throw new global::System.ArgumentNullException(nameof(part)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartAdded() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.verified.cs new file mode 100644 index 0000000000..e84ffa7047 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartAddedPart + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartAddedPart? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.g.verified.cs new file mode 100644 index 0000000000..c0f12823df --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedPart.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content part that was added. + /// + public sealed partial class RealtimeServerEventResponseContentPartAddedPart + { + /// + /// The content type ("text", "audio"). + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseContentPartAddedPartType? Type { get; set; } + + /// + /// The text content (if type is "text"). + /// + [global::Newtonsoft.Json.JsonProperty("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data (if type is "audio"). + /// + [global::Newtonsoft.Json.JsonProperty("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::Newtonsoft.Json.JsonProperty("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The content type ("text", "audio"). + /// + /// + /// The text content (if type is "text"). + /// + /// + /// Base64-encoded audio data (if type is "audio"). + /// + /// + /// The transcript of the audio (if type is "audio"). + /// + public RealtimeServerEventResponseContentPartAddedPart( + global::G.RealtimeServerEventResponseContentPartAddedPartType? type, + string? text, + string? audio, + string? transcript) + { + this.Type = type; + this.Text = text; + this.Audio = audio; + this.Transcript = transcript; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartAddedPart() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs new file mode 100644 index 0000000000..c92c35fa56 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content type ("text", "audio"). + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseContentPartAddedPartType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="audio")] + Audio, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseContentPartAddedPartTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseContentPartAddedPartType value) + { + return value switch + { + RealtimeServerEventResponseContentPartAddedPartType.Audio => "audio", + RealtimeServerEventResponseContentPartAddedPartType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseContentPartAddedPartType? ToEnum(string value) + { + return value switch + { + "audio" => RealtimeServerEventResponseContentPartAddedPartType.Audio, + "text" => RealtimeServerEventResponseContentPartAddedPartType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedType.g.verified.cs new file mode 100644 index 0000000000..45a6d4d4e1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartAddedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.content_part.added`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseContentPartAddedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.content_part.added")] + ResponseContentPartAdded, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseContentPartAddedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseContentPartAddedType value) + { + return value switch + { + RealtimeServerEventResponseContentPartAddedType.ResponseContentPartAdded => "response.content_part.added", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseContentPartAddedType? ToEnum(string value) + { + return value switch + { + "response.content_part.added" => RealtimeServerEventResponseContentPartAddedType.ResponseContentPartAdded, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDone.Json.g.verified.cs new file mode 100644 index 0000000000..b608f47816 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDone.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartDone? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDone.g.verified.cs new file mode 100644 index 0000000000..852407f29a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDone.g.verified.cs @@ -0,0 +1,110 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a content part is done streaming in an assistant message item.
+ /// Also emitted when a Response is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseContentPartDone + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.content_part.done`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseContentPartDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The content part that is done. + /// + [global::Newtonsoft.Json.JsonProperty("part", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeServerEventResponseContentPartDonePart Part { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.content_part.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The content part that is done. + /// + public RealtimeServerEventResponseContentPartDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + global::G.RealtimeServerEventResponseContentPartDonePart part, + global::G.RealtimeServerEventResponseContentPartDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Part = part ?? throw new global::System.ArgumentNullException(nameof(part)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.verified.cs new file mode 100644 index 0000000000..c62ab63420 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartDonePart + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartDonePart? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.g.verified.cs new file mode 100644 index 0000000000..37ba049f7d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDonePart.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content part that is done. + /// + public sealed partial class RealtimeServerEventResponseContentPartDonePart + { + /// + /// The content type ("text", "audio"). + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public string? Type { get; set; } + + /// + /// The text content (if type is "text"). + /// + [global::Newtonsoft.Json.JsonProperty("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data (if type is "audio"). + /// + [global::Newtonsoft.Json.JsonProperty("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::Newtonsoft.Json.JsonProperty("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The content type ("text", "audio"). + /// + /// + /// The text content (if type is "text"). + /// + /// + /// Base64-encoded audio data (if type is "audio"). + /// + /// + /// The transcript of the audio (if type is "audio"). + /// + public RealtimeServerEventResponseContentPartDonePart( + string? type, + string? text, + string? audio, + string? transcript) + { + this.Type = type; + this.Text = text; + this.Audio = audio; + this.Transcript = transcript; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartDonePart() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDoneType.g.verified.cs new file mode 100644 index 0000000000..5d40367b0e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseContentPartDoneType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.content_part.done`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseContentPartDoneType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.content_part.done")] + ResponseContentPartDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseContentPartDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseContentPartDoneType value) + { + return value switch + { + RealtimeServerEventResponseContentPartDoneType.ResponseContentPartDone => "response.content_part.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseContentPartDoneType? ToEnum(string value) + { + return value switch + { + "response.content_part.done" => RealtimeServerEventResponseContentPartDoneType.ResponseContentPartDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreated.Json.g.verified.cs new file mode 100644 index 0000000000..ea1ba1c95f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseCreated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreated.g.verified.cs new file mode 100644 index 0000000000..5301069a71 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreated.g.verified.cs @@ -0,0 +1,66 @@ +//HintName: G.Models.RealtimeServerEventResponseCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a new Response is created. The first event of response creation,
+ /// where the response is in an initial state of `in_progress`. + ///
+ public sealed partial class RealtimeServerEventResponseCreated + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.created`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseCreatedType Type { get; set; } + + /// + /// The response resource. + /// + [global::Newtonsoft.Json.JsonProperty("response", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeResponse Response { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.created`. + /// + /// + /// The response resource. + /// + public RealtimeServerEventResponseCreated( + string eventId, + global::G.RealtimeResponse response, + global::G.RealtimeServerEventResponseCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Response = response ?? throw new global::System.ArgumentNullException(nameof(response)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreatedType.g.verified.cs new file mode 100644 index 0000000000..d55561e91c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseCreatedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.created`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseCreatedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.created")] + ResponseCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseCreatedType value) + { + return value switch + { + RealtimeServerEventResponseCreatedType.ResponseCreated => "response.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseCreatedType? ToEnum(string value) + { + return value switch + { + "response.created" => RealtimeServerEventResponseCreatedType.ResponseCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDone.Json.g.verified.cs new file mode 100644 index 0000000000..5e11ece576 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDone.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseDone? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDone.g.verified.cs new file mode 100644 index 0000000000..16db2b5159 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDone.g.verified.cs @@ -0,0 +1,67 @@ +//HintName: G.Models.RealtimeServerEventResponseDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a Response is done streaming. Always emitted, no matter the
+ /// final state. The Response object included in the `response.done` event will
+ /// include all output Items in the Response but will omit the raw audio data. + ///
+ public sealed partial class RealtimeServerEventResponseDone + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.done`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseDoneType Type { get; set; } + + /// + /// The response resource. + /// + [global::Newtonsoft.Json.JsonProperty("response", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeResponse Response { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.done`. + /// + /// + /// The response resource. + /// + public RealtimeServerEventResponseDone( + string eventId, + global::G.RealtimeResponse response, + global::G.RealtimeServerEventResponseDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Response = response ?? throw new global::System.ArgumentNullException(nameof(response)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDoneType.g.verified.cs new file mode 100644 index 0000000000..4e2f85a0eb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseDoneType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.done`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseDoneType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.done")] + ResponseDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseDoneType value) + { + return value switch + { + RealtimeServerEventResponseDoneType.ResponseDone => "response.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseDoneType? ToEnum(string value) + { + return value switch + { + "response.done" => RealtimeServerEventResponseDoneType.ResponseDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.verified.cs new file mode 100644 index 0000000000..6caba0af5a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.verified.cs new file mode 100644 index 0000000000..31ebb06d3a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.verified.cs @@ -0,0 +1,109 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated function call arguments are updated. + /// + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDelta + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.function_call_arguments.delta`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the function call item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The ID of the function call. + /// + [global::Newtonsoft.Json.JsonProperty("call_id", Required = global::Newtonsoft.Json.Required.Always)] + public string CallId { get; set; } = default!; + + /// + /// The arguments delta as a JSON string. + /// + [global::Newtonsoft.Json.JsonProperty("delta", Required = global::Newtonsoft.Json.Required.Always)] + public string Delta { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.function_call_arguments.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the function call item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The ID of the function call. + /// + /// + /// The arguments delta as a JSON string. + /// + public RealtimeServerEventResponseFunctionCallArgumentsDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + string callId, + string delta, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.CallId = callId ?? throw new global::System.ArgumentNullException(nameof(callId)); + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseFunctionCallArgumentsDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs new file mode 100644 index 0000000000..65e51a136f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.function_call_arguments.delta`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseFunctionCallArgumentsDeltaType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.function_call_arguments.delta")] + ResponseFunctionCallArgumentsDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseFunctionCallArgumentsDeltaType value) + { + return value switch + { + RealtimeServerEventResponseFunctionCallArgumentsDeltaType.ResponseFunctionCallArgumentsDelta => "response.function_call_arguments.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseFunctionCallArgumentsDeltaType? ToEnum(string value) + { + return value switch + { + "response.function_call_arguments.delta" => RealtimeServerEventResponseFunctionCallArgumentsDeltaType.ResponseFunctionCallArgumentsDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.verified.cs new file mode 100644 index 0000000000..ea36099708 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseFunctionCallArgumentsDone? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.verified.cs new file mode 100644 index 0000000000..35648b29e4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.verified.cs @@ -0,0 +1,110 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated function call arguments are done streaming.
+ /// Also emitted when a Response is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDone + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.function_call_arguments.done`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the function call item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The ID of the function call. + /// + [global::Newtonsoft.Json.JsonProperty("call_id", Required = global::Newtonsoft.Json.Required.Always)] + public string CallId { get; set; } = default!; + + /// + /// The final arguments as a JSON string. + /// + [global::Newtonsoft.Json.JsonProperty("arguments", Required = global::Newtonsoft.Json.Required.Always)] + public string Arguments { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.function_call_arguments.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the function call item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The ID of the function call. + /// + /// + /// The final arguments as a JSON string. + /// + public RealtimeServerEventResponseFunctionCallArgumentsDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + string callId, + string arguments, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.CallId = callId ?? throw new global::System.ArgumentNullException(nameof(callId)); + this.Arguments = arguments ?? throw new global::System.ArgumentNullException(nameof(arguments)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseFunctionCallArgumentsDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs new file mode 100644 index 0000000000..1bf96fb210 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.function_call_arguments.done`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseFunctionCallArgumentsDoneType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.function_call_arguments.done")] + ResponseFunctionCallArgumentsDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseFunctionCallArgumentsDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseFunctionCallArgumentsDoneType value) + { + return value switch + { + RealtimeServerEventResponseFunctionCallArgumentsDoneType.ResponseFunctionCallArgumentsDone => "response.function_call_arguments.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseFunctionCallArgumentsDoneType? ToEnum(string value) + { + return value switch + { + "response.function_call_arguments.done" => RealtimeServerEventResponseFunctionCallArgumentsDoneType.ResponseFunctionCallArgumentsDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.verified.cs new file mode 100644 index 0000000000..6436cdb177 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseOutputItemAdded + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseOutputItemAdded? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.g.verified.cs new file mode 100644 index 0000000000..e23f0acb11 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemAdded.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a new Item is created during Response generation. + /// + public sealed partial class RealtimeServerEventResponseOutputItemAdded + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.output_item.added`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseOutputItemAddedType Type { get; set; } + + /// + /// The ID of the Response to which the item belongs. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The index of the output item in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The item to add to the conversation. + /// + [global::Newtonsoft.Json.JsonProperty("item", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeConversationItem Item { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.output_item.added`. + /// + /// + /// The ID of the Response to which the item belongs. + /// + /// + /// The index of the output item in the Response. + /// + /// + /// The item to add to the conversation. + /// + public RealtimeServerEventResponseOutputItemAdded( + string eventId, + string responseId, + int outputIndex, + global::G.RealtimeConversationItem item, + global::G.RealtimeServerEventResponseOutputItemAddedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.OutputIndex = outputIndex; + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseOutputItemAdded() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs new file mode 100644 index 0000000000..3a7fed116f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemAddedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.output_item.added`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseOutputItemAddedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.output_item.added")] + ResponseOutputItemAdded, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseOutputItemAddedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseOutputItemAddedType value) + { + return value switch + { + RealtimeServerEventResponseOutputItemAddedType.ResponseOutputItemAdded => "response.output_item.added", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseOutputItemAddedType? ToEnum(string value) + { + return value switch + { + "response.output_item.added" => RealtimeServerEventResponseOutputItemAddedType.ResponseOutputItemAdded, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.verified.cs new file mode 100644 index 0000000000..e5d99ee928 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseOutputItemDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseOutputItemDone? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.g.verified.cs new file mode 100644 index 0000000000..57539171d2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.g.verified.cs @@ -0,0 +1,88 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an Item is done streaming. Also emitted when a Response is
+ /// interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseOutputItemDone + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.output_item.done`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseOutputItemDoneType Type { get; set; } + + /// + /// The ID of the Response to which the item belongs. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The index of the output item in the Response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The item to add to the conversation. + /// + [global::Newtonsoft.Json.JsonProperty("item", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeConversationItem Item { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.output_item.done`. + /// + /// + /// The ID of the Response to which the item belongs. + /// + /// + /// The index of the output item in the Response. + /// + /// + /// The item to add to the conversation. + /// + public RealtimeServerEventResponseOutputItemDone( + string eventId, + string responseId, + int outputIndex, + global::G.RealtimeConversationItem item, + global::G.RealtimeServerEventResponseOutputItemDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.OutputIndex = outputIndex; + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseOutputItemDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs new file mode 100644 index 0000000000..98059a8d25 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.output_item.done`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseOutputItemDoneType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.output_item.done")] + ResponseOutputItemDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseOutputItemDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseOutputItemDoneType value) + { + return value switch + { + RealtimeServerEventResponseOutputItemDoneType.ResponseOutputItemDone => "response.output_item.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseOutputItemDoneType? ToEnum(string value) + { + return value switch + { + "response.output_item.done" => RealtimeServerEventResponseOutputItemDoneType.ResponseOutputItemDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDelta.Json.g.verified.cs new file mode 100644 index 0000000000..47427e341b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDelta.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseTextDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseTextDelta? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDelta.g.verified.cs new file mode 100644 index 0000000000..91c871d9ab --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDelta.g.verified.cs @@ -0,0 +1,109 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the text value of a "text" content part is updated. + /// + public sealed partial class RealtimeServerEventResponseTextDelta + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.text.delta`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseTextDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The text delta. + /// + [global::Newtonsoft.Json.JsonProperty("delta", Required = global::Newtonsoft.Json.Required.Always)] + public string Delta { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.text.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The text delta. + /// + public RealtimeServerEventResponseTextDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string delta, + global::G.RealtimeServerEventResponseTextDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseTextDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDeltaType.g.verified.cs new file mode 100644 index 0000000000..8f3432cd80 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDeltaType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.text.delta`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseTextDeltaType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.text.delta")] + ResponseTextDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseTextDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseTextDeltaType value) + { + return value switch + { + RealtimeServerEventResponseTextDeltaType.ResponseTextDelta => "response.text.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseTextDeltaType? ToEnum(string value) + { + return value switch + { + "response.text.delta" => RealtimeServerEventResponseTextDeltaType.ResponseTextDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDone.Json.g.verified.cs new file mode 100644 index 0000000000..263d4f5dc3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDone.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseTextDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseTextDone? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDone.g.verified.cs new file mode 100644 index 0000000000..868aaacc49 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDone.g.verified.cs @@ -0,0 +1,110 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the text value of a "text" content part is done streaming. Also
+ /// emitted when a Response is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseTextDone + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `response.text.done`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventResponseTextDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::Newtonsoft.Json.JsonProperty("response_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ResponseId { get; set; } = default!; + + /// + /// The ID of the item. + /// + [global::Newtonsoft.Json.JsonProperty("item_id", Required = global::Newtonsoft.Json.Required.Always)] + public string ItemId { get; set; } = default!; + + /// + /// The index of the output item in the response. + /// + [global::Newtonsoft.Json.JsonProperty("output_index", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputIndex { get; set; } = default!; + + /// + /// The index of the content part in the item's content array. + /// + [global::Newtonsoft.Json.JsonProperty("content_index", Required = global::Newtonsoft.Json.Required.Always)] + public int ContentIndex { get; set; } = default!; + + /// + /// The final text content. + /// + [global::Newtonsoft.Json.JsonProperty("text", Required = global::Newtonsoft.Json.Required.Always)] + public string Text { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.text.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The final text content. + /// + public RealtimeServerEventResponseTextDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string text, + global::G.RealtimeServerEventResponseTextDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Text = text ?? throw new global::System.ArgumentNullException(nameof(text)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseTextDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDoneType.g.verified.cs new file mode 100644 index 0000000000..ef3e170c80 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventResponseTextDoneType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.text.done`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventResponseTextDoneType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="response.text.done")] + ResponseTextDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseTextDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseTextDoneType value) + { + return value switch + { + RealtimeServerEventResponseTextDoneType.ResponseTextDone => "response.text.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseTextDoneType? ToEnum(string value) + { + return value switch + { + "response.text.done" => RealtimeServerEventResponseTextDoneType.ResponseTextDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreated.Json.g.verified.cs new file mode 100644 index 0000000000..fc8425d003 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventSessionCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventSessionCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventSessionCreated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreated.g.verified.cs new file mode 100644 index 0000000000..9e8de8d2cb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreated.g.verified.cs @@ -0,0 +1,67 @@ +//HintName: G.Models.RealtimeServerEventSessionCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a Session is created. Emitted automatically when a new
+ /// connection is established as the first server event. This event will contain
+ /// the default Session configuration. + ///
+ public sealed partial class RealtimeServerEventSessionCreated + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `session.created`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventSessionCreatedType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::Newtonsoft.Json.JsonProperty("session", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeSession Session { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `session.created`. + /// + /// + /// Realtime session object configuration. + /// + public RealtimeServerEventSessionCreated( + string eventId, + global::G.RealtimeSession session, + global::G.RealtimeServerEventSessionCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Session = session ?? throw new global::System.ArgumentNullException(nameof(session)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventSessionCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreatedType.g.verified.cs new file mode 100644 index 0000000000..bb958f0ab1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionCreatedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventSessionCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `session.created`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventSessionCreatedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="session.created")] + SessionCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventSessionCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventSessionCreatedType value) + { + return value switch + { + RealtimeServerEventSessionCreatedType.SessionCreated => "session.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventSessionCreatedType? ToEnum(string value) + { + return value switch + { + "session.created" => RealtimeServerEventSessionCreatedType.SessionCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdated.Json.g.verified.cs new file mode 100644 index 0000000000..7683b4f10c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdated.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeServerEventSessionUpdated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventSessionUpdated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventSessionUpdated? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdated.g.verified.cs new file mode 100644 index 0000000000..90714e285d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdated.g.verified.cs @@ -0,0 +1,66 @@ +//HintName: G.Models.RealtimeServerEventSessionUpdated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a session is updated with a `session.update` event, unless
+ /// there is an error. + ///
+ public sealed partial class RealtimeServerEventSessionUpdated + { + /// + /// The unique ID of the server event. + /// + [global::Newtonsoft.Json.JsonProperty("event_id", Required = global::Newtonsoft.Json.Required.Always)] + public string EventId { get; set; } = default!; + + /// + /// The event type, must be `session.updated`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeServerEventSessionUpdatedType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::Newtonsoft.Json.JsonProperty("session", Required = global::Newtonsoft.Json.Required.Always)] + public global::G.RealtimeSession Session { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `session.updated`. + /// + /// + /// Realtime session object configuration. + /// + public RealtimeServerEventSessionUpdated( + string eventId, + global::G.RealtimeSession session, + global::G.RealtimeServerEventSessionUpdatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Session = session ?? throw new global::System.ArgumentNullException(nameof(session)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventSessionUpdated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdatedType.g.verified.cs new file mode 100644 index 0000000000..cb5c5b495e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeServerEventSessionUpdatedType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeServerEventSessionUpdatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `session.updated`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeServerEventSessionUpdatedType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="session.updated")] + SessionUpdated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventSessionUpdatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventSessionUpdatedType value) + { + return value switch + { + RealtimeServerEventSessionUpdatedType.SessionUpdated => "session.updated", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventSessionUpdatedType? ToEnum(string value) + { + return value switch + { + "session.updated" => RealtimeServerEventSessionUpdatedType.SessionUpdated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSession.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSession.Json.g.verified.cs new file mode 100644 index 0000000000..da2e696b83 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSession.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeSession.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSession + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSession? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSession.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSession.g.verified.cs new file mode 100644 index 0000000000..aaa07c0762 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSession.g.verified.cs @@ -0,0 +1,207 @@ +//HintName: G.Models.RealtimeSession.g.cs + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// Realtime session object configuration. + /// + public sealed partial class RealtimeSession + { + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ [global::Newtonsoft.Json.JsonProperty("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + ///
+ [global::Newtonsoft.Json.JsonProperty("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond. Current voice options are `ash`,
+ /// `ballad`, `coral`, `sage`, and `verse`.
+ /// Also supported but not recommended are `alloy`, `echo`, and `shimmer`.
+ /// These older voices are less expressive.
+ /// Voice cannot be changed during the session once the model has
+ /// responded with audio at least once. + ///
+ [global::Newtonsoft.Json.JsonProperty("voice")] + public global::G.RealtimeSessionVoice? Voice { get; set; } + + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::Newtonsoft.Json.JsonProperty("input_audio_format")] + public string? InputAudioFormat { get; set; } + + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::Newtonsoft.Json.JsonProperty("output_audio_format")] + public string? OutputAudioFormat { get; set; } + + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ [global::Newtonsoft.Json.JsonProperty("input_audio_transcription")] + public global::G.RealtimeSessionInputAudioTranscription? InputAudioTranscription { get; set; } + + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ [global::Newtonsoft.Json.JsonProperty("turn_detection")] + public global::G.RealtimeSessionTurnDetection? TurnDetection { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::Newtonsoft.Json.JsonProperty("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + ///
+ [global::Newtonsoft.Json.JsonProperty("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + [global::Newtonsoft.Json.JsonProperty("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + ///
+ [global::Newtonsoft.Json.JsonProperty("max_response_output_tokens")] + public global::G.OneOf? MaxResponseOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + /// + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + /// + /// + /// The voice the model uses to respond. Current voice options are `ash`,
+ /// `ballad`, `coral`, `sage`, and `verse`.
+ /// Also supported but not recommended are `alloy`, `echo`, and `shimmer`.
+ /// These older voices are less expressive.
+ /// Voice cannot be changed during the session once the model has
+ /// responded with audio at least once. + /// + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + /// + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + /// + /// + /// Tools (functions) available to the model. + /// + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + /// + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + /// + public RealtimeSession( + global::System.Collections.Generic.IList? modalities, + string? instructions, + global::G.RealtimeSessionVoice? voice, + string? inputAudioFormat, + string? outputAudioFormat, + global::G.RealtimeSessionInputAudioTranscription? inputAudioTranscription, + global::G.RealtimeSessionTurnDetection? turnDetection, + global::System.Collections.Generic.IList? tools, + string? toolChoice, + double? temperature, + global::G.OneOf? maxResponseOutputTokens) + { + this.Modalities = modalities; + this.Instructions = instructions; + this.Voice = voice; + this.InputAudioFormat = inputAudioFormat; + this.OutputAudioFormat = outputAudioFormat; + this.InputAudioTranscription = inputAudioTranscription; + this.TurnDetection = turnDetection; + this.Tools = tools; + this.ToolChoice = toolChoice; + this.Temperature = temperature; + this.MaxResponseOutputTokens = maxResponseOutputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSession() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionInputAudioTranscription.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionInputAudioTranscription.Json.g.verified.cs new file mode 100644 index 0000000000..68015b88f4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionInputAudioTranscription.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeSessionInputAudioTranscription.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionInputAudioTranscription + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionInputAudioTranscription? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionInputAudioTranscription.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionInputAudioTranscription.g.verified.cs new file mode 100644 index 0000000000..9d682d35eb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionInputAudioTranscription.g.verified.cs @@ -0,0 +1,49 @@ +//HintName: G.Models.RealtimeSessionInputAudioTranscription.g.cs + +#nullable enable + +namespace G +{ + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ public sealed partial class RealtimeSessionInputAudioTranscription + { + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + ///
+ [global::Newtonsoft.Json.JsonProperty("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + /// + public RealtimeSessionInputAudioTranscription( + string? model) + { + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionInputAudioTranscription() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionMaxResponseOutputTokens.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionMaxResponseOutputTokens.g.verified.cs new file mode 100644 index 0000000000..76e010a3c1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionMaxResponseOutputTokens.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeSessionMaxResponseOutputTokens.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeSessionMaxResponseOutputTokens + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="inf")] + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionMaxResponseOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionMaxResponseOutputTokens value) + { + return value switch + { + RealtimeSessionMaxResponseOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionMaxResponseOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeSessionMaxResponseOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalitie.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalitie.g.verified.cs new file mode 100644 index 0000000000..dc92200383 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalitie.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.RealtimeSessionModalitie.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeSessionModalitie + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="audio")] + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionModalitie value) + { + return value switch + { + RealtimeSessionModalitie.Text => "text", + RealtimeSessionModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionModalitie? ToEnum(string value) + { + return value switch + { + "text" => RealtimeSessionModalitie.Text, + "audio" => RealtimeSessionModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalities.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalities.Json.g.verified.cs new file mode 100644 index 0000000000..c60f24d551 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalities.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeSessionModalities.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionModalities + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionModalities? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalities.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalities.g.verified.cs new file mode 100644 index 0000000000..67d1d5a0dd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionModalities.g.verified.cs @@ -0,0 +1,28 @@ +//HintName: G.Models.RealtimeSessionModalities.g.cs + +#nullable enable + +namespace G +{ + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ public sealed partial class RealtimeSessionModalities + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionModalities( + ) + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTool.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTool.Json.g.verified.cs new file mode 100644 index 0000000000..62e342b28f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTool.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeSessionTool.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionTool + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionTool? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTool.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTool.g.verified.cs new file mode 100644 index 0000000000..be6975a26e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTool.g.verified.cs @@ -0,0 +1,80 @@ +//HintName: G.Models.RealtimeSessionTool.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RealtimeSessionTool + { + /// + /// The type of the tool, i.e. `function`. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RealtimeSessionToolType? Type { get; set; } + + /// + /// The name of the function. + /// + [global::Newtonsoft.Json.JsonProperty("name")] + public string? Name { get; set; } + + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + ///
+ [global::Newtonsoft.Json.JsonProperty("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::Newtonsoft.Json.JsonProperty("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the tool, i.e. `function`. + /// + /// + /// The name of the function. + /// + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + /// + /// + /// Parameters of the function in JSON Schema. + /// + public RealtimeSessionTool( + global::G.RealtimeSessionToolType? type, + string? name, + string? description, + object? parameters) + { + this.Type = type; + this.Name = name; + this.Description = description; + this.Parameters = parameters; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionTool() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolParameters.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolParameters.Json.g.verified.cs new file mode 100644 index 0000000000..c817690e5a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolParameters.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeSessionToolParameters.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionToolParameters + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionToolParameters? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolParameters.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolParameters.g.verified.cs new file mode 100644 index 0000000000..f3f00fac24 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolParameters.g.verified.cs @@ -0,0 +1,27 @@ +//HintName: G.Models.RealtimeSessionToolParameters.g.cs + +#nullable enable + +namespace G +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeSessionToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionToolParameters( + ) + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolType.g.verified.cs new file mode 100644 index 0000000000..3b359be0e2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionToolType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RealtimeSessionToolType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the tool, i.e. `function`. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RealtimeSessionToolType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="function")] + Function, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionToolTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionToolType value) + { + return value switch + { + RealtimeSessionToolType.Function => "function", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionToolType? ToEnum(string value) + { + return value switch + { + "function" => RealtimeSessionToolType.Function, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTurnDetection.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTurnDetection.Json.g.verified.cs new file mode 100644 index 0000000000..e58c43a3f3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTurnDetection.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeSessionTurnDetection.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionTurnDetection + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionTurnDetection? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTurnDetection.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTurnDetection.g.verified.cs new file mode 100644 index 0000000000..218ed7ce77 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionTurnDetection.g.verified.cs @@ -0,0 +1,88 @@ +//HintName: G.Models.RealtimeSessionTurnDetection.g.cs + +#nullable enable + +namespace G +{ + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ public sealed partial class RealtimeSessionTurnDetection + { + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public string? Type { get; set; } + + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + ///
+ [global::Newtonsoft.Json.JsonProperty("threshold")] + public double? Threshold { get; set; } + + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + ///
+ [global::Newtonsoft.Json.JsonProperty("prefix_padding_ms")] + public int? PrefixPaddingMs { get; set; } + + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + ///
+ [global::Newtonsoft.Json.JsonProperty("silence_duration_ms")] + public int? SilenceDurationMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + /// + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + /// + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + /// + public RealtimeSessionTurnDetection( + string? type, + double? threshold, + int? prefixPaddingMs, + int? silenceDurationMs) + { + this.Type = type; + this.Threshold = threshold; + this.PrefixPaddingMs = prefixPaddingMs; + this.SilenceDurationMs = silenceDurationMs; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionTurnDetection() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionVoice.g.verified.cs new file mode 100644 index 0000000000..4f6b0f6631 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RealtimeSessionVoice.g.verified.cs @@ -0,0 +1,102 @@ +//HintName: G.Models.RealtimeSessionVoice.g.cs + +#nullable enable + +namespace G +{ + /// + /// The voice the model uses to respond. Current voice options are `ash`,
+ /// `ballad`, `coral`, `sage`, and `verse`.
+ /// Also supported but not recommended are `alloy`, `echo`, and `shimmer`.
+ /// These older voices are less expressive.
+ /// Voice cannot be changed during the session once the model has
+ /// responded with audio at least once. + ///
+ [global::System.Runtime.Serialization.DataContract] + public enum RealtimeSessionVoice + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="alloy")] + Alloy, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="ash")] + Ash, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="ballad")] + Ballad, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="coral")] + Coral, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="echo")] + Echo, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="sage")] + Sage, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="shimmer")] + Shimmer, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="verse")] + Verse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionVoice value) + { + return value switch + { + RealtimeSessionVoice.Alloy => "alloy", + RealtimeSessionVoice.Ash => "ash", + RealtimeSessionVoice.Ballad => "ballad", + RealtimeSessionVoice.Coral => "coral", + RealtimeSessionVoice.Echo => "echo", + RealtimeSessionVoice.Sage => "sage", + RealtimeSessionVoice.Shimmer => "shimmer", + RealtimeSessionVoice.Verse => "verse", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionVoice? ToEnum(string value) + { + return value switch + { + "alloy" => RealtimeSessionVoice.Alloy, + "ash" => RealtimeSessionVoice.Ash, + "ballad" => RealtimeSessionVoice.Ballad, + "coral" => RealtimeSessionVoice.Coral, + "echo" => RealtimeSessionVoice.Echo, + "sage" => RealtimeSessionVoice.Sage, + "shimmer" => RealtimeSessionVoice.Shimmer, + "verse" => RealtimeSessionVoice.Verse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResponseFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResponseFormat.g.verified.cs index 50821a2646..68abbc4443 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResponseFormat.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResponseFormat.g.verified.cs @@ -6,9 +6,9 @@ namespace G { /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
public readonly partial struct ResponseFormat : global::System.IEquatable diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResultItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResultItem.Json.g.verified.cs new file mode 100644 index 0000000000..c44c774310 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResultItem.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.ResultItem.Json.g.cs +#nullable enable + +namespace G +{ + public readonly partial struct ResultItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ResultItem? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResultItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResultItem.g.verified.cs new file mode 100644 index 0000000000..6d9bc3edd0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ResultItem.g.verified.cs @@ -0,0 +1,580 @@ +//HintName: G.Models.ResultItem.g.cs +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// + /// + public readonly partial struct ResultItem : global::System.IEquatable + { + /// + /// + /// + public global::G.UsageTimeBucketResultItemDiscriminatorObject? Object { get; } + + /// + /// The aggregated completions usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageCompletionsResult? OrganizationUsageCompletionsResult { get; init; } +#else + public global::G.UsageCompletionsResult? OrganizationUsageCompletionsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageCompletionsResult))] +#endif + public bool IsOrganizationUsageCompletionsResult => OrganizationUsageCompletionsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageCompletionsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageCompletionsResult?(ResultItem @this) => @this.OrganizationUsageCompletionsResult; + + /// + /// + /// + public ResultItem(global::G.UsageCompletionsResult? value) + { + OrganizationUsageCompletionsResult = value; + } + + /// + /// The aggregated embeddings usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageEmbeddingsResult? OrganizationUsageEmbeddingsResult { get; init; } +#else + public global::G.UsageEmbeddingsResult? OrganizationUsageEmbeddingsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageEmbeddingsResult))] +#endif + public bool IsOrganizationUsageEmbeddingsResult => OrganizationUsageEmbeddingsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageEmbeddingsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageEmbeddingsResult?(ResultItem @this) => @this.OrganizationUsageEmbeddingsResult; + + /// + /// + /// + public ResultItem(global::G.UsageEmbeddingsResult? value) + { + OrganizationUsageEmbeddingsResult = value; + } + + /// + /// The aggregated moderations usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageModerationsResult? OrganizationUsageModerationsResult { get; init; } +#else + public global::G.UsageModerationsResult? OrganizationUsageModerationsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageModerationsResult))] +#endif + public bool IsOrganizationUsageModerationsResult => OrganizationUsageModerationsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageModerationsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageModerationsResult?(ResultItem @this) => @this.OrganizationUsageModerationsResult; + + /// + /// + /// + public ResultItem(global::G.UsageModerationsResult? value) + { + OrganizationUsageModerationsResult = value; + } + + /// + /// The aggregated images usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageImagesResult? OrganizationUsageImagesResult { get; init; } +#else + public global::G.UsageImagesResult? OrganizationUsageImagesResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageImagesResult))] +#endif + public bool IsOrganizationUsageImagesResult => OrganizationUsageImagesResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageImagesResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageImagesResult?(ResultItem @this) => @this.OrganizationUsageImagesResult; + + /// + /// + /// + public ResultItem(global::G.UsageImagesResult? value) + { + OrganizationUsageImagesResult = value; + } + + /// + /// The aggregated audio speeches usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageAudioSpeechesResult? OrganizationUsageAudioSpeechesResult { get; init; } +#else + public global::G.UsageAudioSpeechesResult? OrganizationUsageAudioSpeechesResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageAudioSpeechesResult))] +#endif + public bool IsOrganizationUsageAudioSpeechesResult => OrganizationUsageAudioSpeechesResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageAudioSpeechesResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageAudioSpeechesResult?(ResultItem @this) => @this.OrganizationUsageAudioSpeechesResult; + + /// + /// + /// + public ResultItem(global::G.UsageAudioSpeechesResult? value) + { + OrganizationUsageAudioSpeechesResult = value; + } + + /// + /// The aggregated audio transcriptions usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageAudioTranscriptionsResult? OrganizationUsageAudioTranscriptionsResult { get; init; } +#else + public global::G.UsageAudioTranscriptionsResult? OrganizationUsageAudioTranscriptionsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageAudioTranscriptionsResult))] +#endif + public bool IsOrganizationUsageAudioTranscriptionsResult => OrganizationUsageAudioTranscriptionsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageAudioTranscriptionsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageAudioTranscriptionsResult?(ResultItem @this) => @this.OrganizationUsageAudioTranscriptionsResult; + + /// + /// + /// + public ResultItem(global::G.UsageAudioTranscriptionsResult? value) + { + OrganizationUsageAudioTranscriptionsResult = value; + } + + /// + /// The aggregated vector stores usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageVectorStoresResult? OrganizationUsageVectorStoresResult { get; init; } +#else + public global::G.UsageVectorStoresResult? OrganizationUsageVectorStoresResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageVectorStoresResult))] +#endif + public bool IsOrganizationUsageVectorStoresResult => OrganizationUsageVectorStoresResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageVectorStoresResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageVectorStoresResult?(ResultItem @this) => @this.OrganizationUsageVectorStoresResult; + + /// + /// + /// + public ResultItem(global::G.UsageVectorStoresResult? value) + { + OrganizationUsageVectorStoresResult = value; + } + + /// + /// The aggregated code interpreter sessions usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageCodeInterpreterSessionsResult? OrganizationUsageCodeInterpreterSessionsResult { get; init; } +#else + public global::G.UsageCodeInterpreterSessionsResult? OrganizationUsageCodeInterpreterSessionsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageCodeInterpreterSessionsResult))] +#endif + public bool IsOrganizationUsageCodeInterpreterSessionsResult => OrganizationUsageCodeInterpreterSessionsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageCodeInterpreterSessionsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageCodeInterpreterSessionsResult?(ResultItem @this) => @this.OrganizationUsageCodeInterpreterSessionsResult; + + /// + /// + /// + public ResultItem(global::G.UsageCodeInterpreterSessionsResult? value) + { + OrganizationUsageCodeInterpreterSessionsResult = value; + } + + /// + /// The aggregated costs details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.CostsResult? OrganizationCostsResult { get; init; } +#else + public global::G.CostsResult? OrganizationCostsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationCostsResult))] +#endif + public bool IsOrganizationCostsResult => OrganizationCostsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.CostsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.CostsResult?(ResultItem @this) => @this.OrganizationCostsResult; + + /// + /// + /// + public ResultItem(global::G.CostsResult? value) + { + OrganizationCostsResult = value; + } + + /// + /// + /// + public ResultItem( + global::G.UsageTimeBucketResultItemDiscriminatorObject? @object, + global::G.UsageCompletionsResult? organizationUsageCompletionsResult, + global::G.UsageEmbeddingsResult? organizationUsageEmbeddingsResult, + global::G.UsageModerationsResult? organizationUsageModerationsResult, + global::G.UsageImagesResult? organizationUsageImagesResult, + global::G.UsageAudioSpeechesResult? organizationUsageAudioSpeechesResult, + global::G.UsageAudioTranscriptionsResult? organizationUsageAudioTranscriptionsResult, + global::G.UsageVectorStoresResult? organizationUsageVectorStoresResult, + global::G.UsageCodeInterpreterSessionsResult? organizationUsageCodeInterpreterSessionsResult, + global::G.CostsResult? organizationCostsResult + ) + { + Object = @object; + + OrganizationUsageCompletionsResult = organizationUsageCompletionsResult; + OrganizationUsageEmbeddingsResult = organizationUsageEmbeddingsResult; + OrganizationUsageModerationsResult = organizationUsageModerationsResult; + OrganizationUsageImagesResult = organizationUsageImagesResult; + OrganizationUsageAudioSpeechesResult = organizationUsageAudioSpeechesResult; + OrganizationUsageAudioTranscriptionsResult = organizationUsageAudioTranscriptionsResult; + OrganizationUsageVectorStoresResult = organizationUsageVectorStoresResult; + OrganizationUsageCodeInterpreterSessionsResult = organizationUsageCodeInterpreterSessionsResult; + OrganizationCostsResult = organizationCostsResult; + } + + /// + /// + /// + public object? Object1 => + OrganizationCostsResult as object ?? + OrganizationUsageCodeInterpreterSessionsResult as object ?? + OrganizationUsageVectorStoresResult as object ?? + OrganizationUsageAudioTranscriptionsResult as object ?? + OrganizationUsageAudioSpeechesResult as object ?? + OrganizationUsageImagesResult as object ?? + OrganizationUsageModerationsResult as object ?? + OrganizationUsageEmbeddingsResult as object ?? + OrganizationUsageCompletionsResult as object + ; + + /// + /// + /// + public bool Validate() + { + return IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && IsOrganizationCostsResult; + } + + /// + /// + /// + public TResult? Match( + global::System.Func? organizationUsageCompletionsResult = null, + global::System.Func? organizationUsageEmbeddingsResult = null, + global::System.Func? organizationUsageModerationsResult = null, + global::System.Func? organizationUsageImagesResult = null, + global::System.Func? organizationUsageAudioSpeechesResult = null, + global::System.Func? organizationUsageAudioTranscriptionsResult = null, + global::System.Func? organizationUsageVectorStoresResult = null, + global::System.Func? organizationUsageCodeInterpreterSessionsResult = null, + global::System.Func? organizationCostsResult = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsOrganizationUsageCompletionsResult && organizationUsageCompletionsResult != null) + { + return organizationUsageCompletionsResult(OrganizationUsageCompletionsResult!); + } + else if (IsOrganizationUsageEmbeddingsResult && organizationUsageEmbeddingsResult != null) + { + return organizationUsageEmbeddingsResult(OrganizationUsageEmbeddingsResult!); + } + else if (IsOrganizationUsageModerationsResult && organizationUsageModerationsResult != null) + { + return organizationUsageModerationsResult(OrganizationUsageModerationsResult!); + } + else if (IsOrganizationUsageImagesResult && organizationUsageImagesResult != null) + { + return organizationUsageImagesResult(OrganizationUsageImagesResult!); + } + else if (IsOrganizationUsageAudioSpeechesResult && organizationUsageAudioSpeechesResult != null) + { + return organizationUsageAudioSpeechesResult(OrganizationUsageAudioSpeechesResult!); + } + else if (IsOrganizationUsageAudioTranscriptionsResult && organizationUsageAudioTranscriptionsResult != null) + { + return organizationUsageAudioTranscriptionsResult(OrganizationUsageAudioTranscriptionsResult!); + } + else if (IsOrganizationUsageVectorStoresResult && organizationUsageVectorStoresResult != null) + { + return organizationUsageVectorStoresResult(OrganizationUsageVectorStoresResult!); + } + else if (IsOrganizationUsageCodeInterpreterSessionsResult && organizationUsageCodeInterpreterSessionsResult != null) + { + return organizationUsageCodeInterpreterSessionsResult(OrganizationUsageCodeInterpreterSessionsResult!); + } + else if (IsOrganizationCostsResult && organizationCostsResult != null) + { + return organizationCostsResult(OrganizationCostsResult!); + } + + return default(TResult); + } + + /// + /// + /// + public void Match( + global::System.Action? organizationUsageCompletionsResult = null, + global::System.Action? organizationUsageEmbeddingsResult = null, + global::System.Action? organizationUsageModerationsResult = null, + global::System.Action? organizationUsageImagesResult = null, + global::System.Action? organizationUsageAudioSpeechesResult = null, + global::System.Action? organizationUsageAudioTranscriptionsResult = null, + global::System.Action? organizationUsageVectorStoresResult = null, + global::System.Action? organizationUsageCodeInterpreterSessionsResult = null, + global::System.Action? organizationCostsResult = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsOrganizationUsageCompletionsResult) + { + organizationUsageCompletionsResult?.Invoke(OrganizationUsageCompletionsResult!); + } + else if (IsOrganizationUsageEmbeddingsResult) + { + organizationUsageEmbeddingsResult?.Invoke(OrganizationUsageEmbeddingsResult!); + } + else if (IsOrganizationUsageModerationsResult) + { + organizationUsageModerationsResult?.Invoke(OrganizationUsageModerationsResult!); + } + else if (IsOrganizationUsageImagesResult) + { + organizationUsageImagesResult?.Invoke(OrganizationUsageImagesResult!); + } + else if (IsOrganizationUsageAudioSpeechesResult) + { + organizationUsageAudioSpeechesResult?.Invoke(OrganizationUsageAudioSpeechesResult!); + } + else if (IsOrganizationUsageAudioTranscriptionsResult) + { + organizationUsageAudioTranscriptionsResult?.Invoke(OrganizationUsageAudioTranscriptionsResult!); + } + else if (IsOrganizationUsageVectorStoresResult) + { + organizationUsageVectorStoresResult?.Invoke(OrganizationUsageVectorStoresResult!); + } + else if (IsOrganizationUsageCodeInterpreterSessionsResult) + { + organizationUsageCodeInterpreterSessionsResult?.Invoke(OrganizationUsageCodeInterpreterSessionsResult!); + } + else if (IsOrganizationCostsResult) + { + organizationCostsResult?.Invoke(OrganizationCostsResult!); + } + } + + /// + /// + /// + public override int GetHashCode() + { + var fields = new object?[] + { + OrganizationUsageCompletionsResult, + typeof(global::G.UsageCompletionsResult), + OrganizationUsageEmbeddingsResult, + typeof(global::G.UsageEmbeddingsResult), + OrganizationUsageModerationsResult, + typeof(global::G.UsageModerationsResult), + OrganizationUsageImagesResult, + typeof(global::G.UsageImagesResult), + OrganizationUsageAudioSpeechesResult, + typeof(global::G.UsageAudioSpeechesResult), + OrganizationUsageAudioTranscriptionsResult, + typeof(global::G.UsageAudioTranscriptionsResult), + OrganizationUsageVectorStoresResult, + typeof(global::G.UsageVectorStoresResult), + OrganizationUsageCodeInterpreterSessionsResult, + typeof(global::G.UsageCodeInterpreterSessionsResult), + OrganizationCostsResult, + typeof(global::G.CostsResult), + }; + const int offset = unchecked((int)2166136261); + const int prime = 16777619; + static int HashCodeAggregator(int hashCode, object? value) => value == null + ? (hashCode ^ 0) * prime + : (hashCode ^ value.GetHashCode()) * prime; + + return global::System.Linq.Enumerable.Aggregate(fields, offset, HashCodeAggregator); + } + + /// + /// + /// + public bool Equals(ResultItem other) + { + return + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageCompletionsResult, other.OrganizationUsageCompletionsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageEmbeddingsResult, other.OrganizationUsageEmbeddingsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageModerationsResult, other.OrganizationUsageModerationsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageImagesResult, other.OrganizationUsageImagesResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageAudioSpeechesResult, other.OrganizationUsageAudioSpeechesResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageAudioTranscriptionsResult, other.OrganizationUsageAudioTranscriptionsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageVectorStoresResult, other.OrganizationUsageVectorStoresResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageCodeInterpreterSessionsResult, other.OrganizationUsageCodeInterpreterSessionsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationCostsResult, other.OrganizationCostsResult) + ; + } + + /// + /// + /// + public static bool operator ==(ResultItem obj1, ResultItem obj2) + { + return global::System.Collections.Generic.EqualityComparer.Default.Equals(obj1, obj2); + } + + /// + /// + /// + public static bool operator !=(ResultItem obj1, ResultItem obj2) + { + return !(obj1 == obj2); + } + + /// + /// + /// + public override bool Equals(object? obj) + { + return obj is ResultItem o && Equals(o); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObject.g.verified.cs index bdfad59c00..42d3316865 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObject.g.verified.cs @@ -106,13 +106,14 @@ public sealed partial class RunObject public string Instructions { get; set; } = default!; /// - /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("tools", Required = global::Newtonsoft.Json.Required.Always)] - public global::System.Collections.Generic.IList Tools { get; set; } = default!; + public global::System.Collections.Generic.IList Tools { get; set; } = default!; /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata", Required = global::Newtonsoft.Json.Required.Always)] public object? Metadata { get; set; } = default!; @@ -164,15 +165,17 @@ public sealed partial class RunObject public global::G.AssistantsApiToolChoiceOption ToolChoice { get; set; } = default!; /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
+ /// true [global::Newtonsoft.Json.JsonProperty("parallel_tool_calls", Required = global::Newtonsoft.Json.Required.Always)] - public bool? ParallelToolCalls { get; set; } = default!; + public bool ParallelToolCalls { get; set; } = default!; /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::Newtonsoft.Json.JsonProperty("response_format", Required = global::Newtonsoft.Json.Required.Always)] @@ -236,10 +239,11 @@ public sealed partial class RunObject /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. /// /// - /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.
+ /// Default Value: [] /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -267,12 +271,13 @@ public sealed partial class RunObject /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// public RunObject( @@ -291,14 +296,14 @@ public RunObject( global::G.RunObjectIncompleteDetails? incompleteDetails, string model, string instructions, - global::System.Collections.Generic.IList tools, + global::System.Collections.Generic.IList tools, object? metadata, global::G.RunCompletionUsage? usage, int? maxPromptTokens, int? maxCompletionTokens, global::G.TruncationObject truncationStrategy, global::G.AssistantsApiToolChoiceOption toolChoice, - bool? parallelToolCalls, + bool parallelToolCalls, global::G.AssistantsApiResponseFormatOption responseFormat, global::G.RunObjectObject @object, double? temperature, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObjectMetadata.g.verified.cs index e236246d75..bddb0ca2dc 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class RunObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs index e7ffe2535d..81e35a4ae8 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class RunStepDeltaStepDetailsToolCallsObject /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. ///
[global::Newtonsoft.Json.JsonProperty("tool_calls")] - public global::System.Collections.Generic.IList? ToolCalls { get; set; } + public global::System.Collections.Generic.IList? ToolCalls { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -38,7 +38,7 @@ public sealed partial class RunStepDeltaStepDetailsToolCallsObject /// public RunStepDeltaStepDetailsToolCallsObject( global::G.RunStepDeltaStepDetailsToolCallsObjectType type, - global::System.Collections.Generic.IList? toolCalls) + global::System.Collections.Generic.IList? toolCalls) { this.Type = type; this.ToolCalls = toolCalls; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs index 63ac188d7c..15df434253 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs @@ -25,7 +25,7 @@ public sealed partial class RunStepDetailsToolCallsFileSearchObject /// For now, this is always going to be an empty object. /// [global::Newtonsoft.Json.JsonProperty("file_search", Required = global::Newtonsoft.Json.Required.Always)] - public object FileSearch { get; set; } = default!; + public global::G.RunStepDetailsToolCallsFileSearchObjectFileSearch FileSearch { get; set; } = default!; /// /// Additional properties that are not explicitly defined in the schema @@ -47,7 +47,7 @@ public sealed partial class RunStepDetailsToolCallsFileSearchObject /// public RunStepDetailsToolCallsFileSearchObject( string id, - object fileSearch, + global::G.RunStepDetailsToolCallsFileSearchObjectFileSearch fileSearch, global::G.RunStepDetailsToolCallsFileSearchObjectType type) { this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs index 3bd45dbae0..1276785cc1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs @@ -9,6 +9,17 @@ namespace G /// public sealed partial class RunStepDetailsToolCallsFileSearchObjectFileSearch { + /// + /// The ranking options for the file search. + /// + [global::Newtonsoft.Json.JsonProperty("ranking_options")] + public global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? RankingOptions { get; set; } + + /// + /// The results of the file search. + /// + [global::Newtonsoft.Json.JsonProperty("results")] + public global::System.Collections.Generic.IList? Results { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -19,8 +30,24 @@ public sealed partial class RunStepDetailsToolCallsFileSearchObjectFileSearch /// /// Initializes a new instance of the class. /// + /// + /// The ranking options for the file search. + /// + /// + /// The results of the file search. + /// public RunStepDetailsToolCallsFileSearchObjectFileSearch( - ) + global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + global::System.Collections.Generic.IList? results) + { + this.RankingOptions = rankingOptions; + this.Results = results; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchObjectFileSearch() { } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.verified.cs new file mode 100644 index 0000000000..4fab5a69a5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RunStepDetailsToolCallsFileSearchRankingOptionsObject + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.verified.cs new file mode 100644 index 0000000000..7b3a3ce6ce --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranking options for the file search. + /// + public sealed partial class RunStepDetailsToolCallsFileSearchRankingOptionsObject + { + /// + /// The ranker used for the file search. + /// + [global::Newtonsoft.Json.JsonProperty("ranker")] + public global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker Ranker { get; set; } + + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + [global::Newtonsoft.Json.JsonProperty("score_threshold", Required = global::Newtonsoft.Json.Required.Always)] + public double ScoreThreshold { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The ranker used for the file search. + /// + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + public RunStepDetailsToolCallsFileSearchRankingOptionsObject( + double scoreThreshold, + global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker ranker) + { + this.ScoreThreshold = scoreThreshold; + this.Ranker = ranker; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchRankingOptionsObject() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs new file mode 100644 index 0000000000..d94baf9c57 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranker used for the file search. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="default_2024_08_21")] + Default20240821, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker value) + { + return value switch + { + RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.Default20240821 => "default_2024_08_21", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker? ToEnum(string value) + { + return value switch + { + "default_2024_08_21" => RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.Default20240821, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.verified.cs new file mode 100644 index 0000000000..cbb1ac3703 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RunStepDetailsToolCallsFileSearchResultObject + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RunStepDetailsToolCallsFileSearchResultObject? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.verified.cs new file mode 100644 index 0000000000..fb7e0c5336 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// A result instance of the file search. + /// + public sealed partial class RunStepDetailsToolCallsFileSearchResultObject + { + /// + /// The ID of the file that result was found in. + /// + [global::Newtonsoft.Json.JsonProperty("file_id", Required = global::Newtonsoft.Json.Required.Always)] + public string FileId { get; set; } = default!; + + /// + /// The name of the file that result was found in. + /// + [global::Newtonsoft.Json.JsonProperty("file_name", Required = global::Newtonsoft.Json.Required.Always)] + public string FileName { get; set; } = default!; + + /// + /// The score of the result. All values must be a floating point number between 0 and 1. + /// + [global::Newtonsoft.Json.JsonProperty("score", Required = global::Newtonsoft.Json.Required.Always)] + public double Score { get; set; } = default!; + + /// + /// The content of the result that was found. The content is only included if requested via the include query parameter. + /// + [global::Newtonsoft.Json.JsonProperty("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The ID of the file that result was found in. + /// + /// + /// The name of the file that result was found in. + /// + /// + /// The score of the result. All values must be a floating point number between 0 and 1. + /// + /// + /// The content of the result that was found. The content is only included if requested via the include query parameter. + /// + public RunStepDetailsToolCallsFileSearchResultObject( + string fileId, + string fileName, + double score, + global::System.Collections.Generic.IList? content) + { + this.FileId = fileId ?? throw new global::System.ArgumentNullException(nameof(fileId)); + this.FileName = fileName ?? throw new global::System.ArgumentNullException(nameof(fileName)); + this.Score = score; + this.Content = content; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchResultObject() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.verified.cs new file mode 100644 index 0000000000..265dd400c3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RunStepDetailsToolCallsFileSearchResultObjectContentItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.verified.cs new file mode 100644 index 0000000000..4c039523aa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RunStepDetailsToolCallsFileSearchResultObjectContentItem + { + /// + /// The type of the content. + /// + [global::Newtonsoft.Json.JsonProperty("type")] + public global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? Type { get; set; } + + /// + /// The text content of the file. + /// + [global::Newtonsoft.Json.JsonProperty("text")] + public string? Text { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the content. + /// + /// + /// The text content of the file. + /// + public RunStepDetailsToolCallsFileSearchResultObjectContentItem( + global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? type, + string? text) + { + this.Type = type; + this.Text = text; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchResultObjectContentItem() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs new file mode 100644 index 0000000000..b3e4ea2df8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the content. + /// + [global::System.Runtime.Serialization.DataContract] + public enum RunStepDetailsToolCallsFileSearchResultObjectContentItemType + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="text")] + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RunStepDetailsToolCallsFileSearchResultObjectContentItemType value) + { + return value switch + { + RunStepDetailsToolCallsFileSearchResultObjectContentItemType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RunStepDetailsToolCallsFileSearchResultObjectContentItemType? ToEnum(string value) + { + return value switch + { + "text" => RunStepDetailsToolCallsFileSearchResultObjectContentItemType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs index a150d2b4f0..8a5ca88de0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class RunStepDetailsToolCallsObject /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. /// [global::Newtonsoft.Json.JsonProperty("tool_calls", Required = global::Newtonsoft.Json.Required.Always)] - public global::System.Collections.Generic.IList ToolCalls { get; set; } = default!; + public global::System.Collections.Generic.IList ToolCalls { get; set; } = default!; /// /// Additional properties that are not explicitly defined in the schema @@ -37,7 +37,7 @@ public sealed partial class RunStepDetailsToolCallsObject /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. /// public RunStepDetailsToolCallsObject( - global::System.Collections.Generic.IList toolCalls, + global::System.Collections.Generic.IList toolCalls, global::G.RunStepDetailsToolCallsObjectType type) { this.ToolCalls = toolCalls ?? throw new global::System.ArgumentNullException(nameof(toolCalls)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObject.g.verified.cs index 423853882b..e55b3571c8 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObject.g.verified.cs @@ -94,7 +94,7 @@ public sealed partial class RunStepObject public global::System.DateTimeOffset? CompletedAt { get; set; } = default!; /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata", Required = global::Newtonsoft.Json.Required.Always)] public object? Metadata { get; set; } = default!; @@ -157,7 +157,7 @@ public sealed partial class RunStepObject /// The Unix timestamp (in seconds) for when the run step completed. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObjectMetadata.g.verified.cs index c5580148dd..108a340131 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.RunStepObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class RunStepObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObject.g.verified.cs index 9807d15e8e..fdfcf83859 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObject.g.verified.cs @@ -34,7 +34,7 @@ public sealed partial class ThreadObject public global::G.ThreadObjectToolResources? ToolResources { get; set; } = default!; /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata", Required = global::Newtonsoft.Json.Required.Always)] public object? Metadata { get; set; } = default!; @@ -61,7 +61,7 @@ public sealed partial class ThreadObject /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public ThreadObject( string id, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectMetadata.g.verified.cs index b8f6d5f8e5..3d878b2f4e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ThreadObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs index 2b4a179a9c..f89ef7437b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class ThreadObjectToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::Newtonsoft.Json.JsonProperty("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class ThreadObjectToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// public ThreadObjectToolResourcesCodeInterpreter( global::System.Collections.Generic.IList? fileIds) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem.g.verified.cs index 1743c35d5d..88f8f3b4d0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } + public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } /// /// Details of the Code Interpreter tool call the run step was involved in. /// #if NET6_0_OR_GREATER - public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } #else - public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; } #endif /// @@ -35,17 +35,17 @@ namespace G /// /// /// - public static implicit operator ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject value) => new ToolCallsItem(value); + public static implicit operator ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsCodeObject value) => new ToolCallsItem(value); /// /// /// - public static implicit operator global::G.RunStepDetailsToolCallsCodeObject?(ToolCallsItem @this) => @this.CodeInterpreter; + public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsCodeObject?(ToolCallsItem @this) => @this.CodeInterpreter; /// /// /// - public ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject? value) + public ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? value) { CodeInterpreter = value; } @@ -54,9 +54,9 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject? value) /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } + public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } #else - public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; } + public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; } #endif /// @@ -70,17 +70,17 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject? value) /// /// /// - public static implicit operator ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem(value); + public static implicit operator ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem(value); /// /// /// - public static implicit operator global::G.RunStepDetailsToolCallsFileSearchObject?(ToolCallsItem @this) => @this.FileSearch; + public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject?(ToolCallsItem @this) => @this.FileSearch; /// /// /// - public ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject? value) + public ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? value) { FileSearch = value; } @@ -89,9 +89,9 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject? value) /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; init; } + public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; init; } #else - public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; } + public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; } #endif /// @@ -105,17 +105,17 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject? value) /// /// /// - public static implicit operator ToolCallsItem(global::G.RunStepDetailsToolCallsFunctionObject value) => new ToolCallsItem(value); + public static implicit operator ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject value) => new ToolCallsItem(value); /// /// /// - public static implicit operator global::G.RunStepDetailsToolCallsFunctionObject?(ToolCallsItem @this) => @this.Function; + public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFunctionObject?(ToolCallsItem @this) => @this.Function; /// /// /// - public ToolCallsItem(global::G.RunStepDetailsToolCallsFunctionObject? value) + public ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? value) { Function = value; } @@ -124,10 +124,10 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsFunctionObject? value) /// /// public ToolCallsItem( - global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? type, - global::G.RunStepDetailsToolCallsCodeObject? codeInterpreter, - global::G.RunStepDetailsToolCallsFileSearchObject? fileSearch, - global::G.RunStepDetailsToolCallsFunctionObject? function + global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? type, + global::G.RunStepDeltaStepDetailsToolCallsCodeObject? codeInterpreter, + global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? fileSearch, + global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? function ) { Type = type; @@ -158,9 +158,9 @@ public bool Validate() /// ///
public TResult? Match( - global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? codeInterpreter = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -188,9 +188,9 @@ public bool Validate() /// ///
public void Match( - global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? codeInterpreter = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -220,11 +220,11 @@ public override int GetHashCode() var fields = new object?[] { CodeInterpreter, - typeof(global::G.RunStepDetailsToolCallsCodeObject), + typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), FileSearch, - typeof(global::G.RunStepDetailsToolCallsFileSearchObject), + typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), Function, - typeof(global::G.RunStepDetailsToolCallsFunctionObject), + typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -241,9 +241,9 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ToolCallsItem other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem2.g.verified.cs index 1fd0486eb4..e676762742 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem2.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolCallsItem2.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } + public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } /// /// Details of the Code Interpreter tool call the run step was involved in. /// #if NET6_0_OR_GREATER - public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } + public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } #else - public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; } + public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; } #endif /// @@ -35,17 +35,17 @@ namespace G /// /// /// - public static implicit operator ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject value) => new ToolCallsItem2(value); + public static implicit operator ToolCallsItem2(global::G.RunStepDetailsToolCallsCodeObject value) => new ToolCallsItem2(value); /// /// /// - public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsCodeObject?(ToolCallsItem2 @this) => @this.CodeInterpreter; + public static implicit operator global::G.RunStepDetailsToolCallsCodeObject?(ToolCallsItem2 @this) => @this.CodeInterpreter; /// /// /// - public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? value) + public ToolCallsItem2(global::G.RunStepDetailsToolCallsCodeObject? value) { CodeInterpreter = value; } @@ -54,9 +54,9 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? valu /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } + public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } #else - public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; } + public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; } #endif /// @@ -70,17 +70,17 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? valu /// /// /// - public static implicit operator ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem2(value); + public static implicit operator ToolCallsItem2(global::G.RunStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem2(value); /// /// /// - public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject?(ToolCallsItem2 @this) => @this.FileSearch; + public static implicit operator global::G.RunStepDetailsToolCallsFileSearchObject?(ToolCallsItem2 @this) => @this.FileSearch; /// /// /// - public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? value) + public ToolCallsItem2(global::G.RunStepDetailsToolCallsFileSearchObject? value) { FileSearch = value; } @@ -89,9 +89,9 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; init; } + public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; init; } #else - public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; } + public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; } #endif /// @@ -105,17 +105,17 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject /// /// /// - public static implicit operator ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject value) => new ToolCallsItem2(value); + public static implicit operator ToolCallsItem2(global::G.RunStepDetailsToolCallsFunctionObject value) => new ToolCallsItem2(value); /// /// /// - public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFunctionObject?(ToolCallsItem2 @this) => @this.Function; + public static implicit operator global::G.RunStepDetailsToolCallsFunctionObject?(ToolCallsItem2 @this) => @this.Function; /// /// /// - public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? value) + public ToolCallsItem2(global::G.RunStepDetailsToolCallsFunctionObject? value) { Function = value; } @@ -124,10 +124,10 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? /// /// public ToolCallsItem2( - global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? type, - global::G.RunStepDeltaStepDetailsToolCallsCodeObject? codeInterpreter, - global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? fileSearch, - global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? function + global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? type, + global::G.RunStepDetailsToolCallsCodeObject? codeInterpreter, + global::G.RunStepDetailsToolCallsFileSearchObject? fileSearch, + global::G.RunStepDetailsToolCallsFunctionObject? function ) { Type = type; @@ -158,9 +158,9 @@ public bool Validate() /// ///
public TResult? Match( - global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? codeInterpreter = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -188,9 +188,9 @@ public bool Validate() /// ///
public void Match( - global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? codeInterpreter = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -220,11 +220,11 @@ public override int GetHashCode() var fields = new object?[] { CodeInterpreter, - typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), + typeof(global::G.RunStepDetailsToolCallsCodeObject), FileSearch, - typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), + typeof(global::G.RunStepDetailsToolCallsFileSearchObject), Function, - typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), + typeof(global::G.RunStepDetailsToolCallsFunctionObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -241,9 +241,9 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ToolCallsItem2 other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem3.g.verified.cs index 7eb1a57846..62ca0eb1fe 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem3.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem3.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.ModifyAssistantRequestToolDiscriminatorType? Type { get; } + public global::G.CreateMessageRequestAttachmentToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem3(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearch? FileSearch { get; init; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearch? FileSearch { get; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } #endif /// @@ -70,78 +70,40 @@ public ToolsItem3(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem3(global::G.AssistantToolsFileSearch value) => new ToolsItem3(value); + public static implicit operator ToolsItem3(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem3(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem3 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem3 @this) => @this.FileSearch; /// /// /// - public ToolsItem3(global::G.AssistantToolsFileSearch? value) + public ToolsItem3(global::G.AssistantToolsFileSearchTypeOnly? value) { FileSearch = value; } - /// - /// - /// -#if NET6_0_OR_GREATER - public global::G.AssistantToolsFunction? Function { get; init; } -#else - public global::G.AssistantToolsFunction? Function { get; } -#endif - - /// - /// - /// -#if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] -#endif - public bool IsFunction => Function != null; - - /// - /// - /// - public static implicit operator ToolsItem3(global::G.AssistantToolsFunction value) => new ToolsItem3(value); - - /// - /// - /// - public static implicit operator global::G.AssistantToolsFunction?(ToolsItem3 @this) => @this.Function; - - /// - /// - /// - public ToolsItem3(global::G.AssistantToolsFunction? value) - { - Function = value; - } - /// /// /// public ToolsItem3( - global::G.ModifyAssistantRequestToolDiscriminatorType? type, + global::G.CreateMessageRequestAttachmentToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearch? fileSearch, - global::G.AssistantToolsFunction? function + global::G.AssistantToolsFileSearchTypeOnly? fileSearch ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; - Function = function; } /// /// /// public object? Object => - Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -151,7 +113,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; + return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; } /// @@ -159,8 +121,7 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? fileSearch = null, bool validate = true) { if (validate) @@ -176,10 +137,6 @@ public bool Validate() { return fileSearch(FileSearch!); } - else if (IsFunction && function != null) - { - return function(Function!); - } return default(TResult); } @@ -189,8 +146,7 @@ public bool Validate() ///
public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? fileSearch = null, bool validate = true) { if (validate) @@ -206,10 +162,6 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } - else if (IsFunction) - { - function?.Invoke(Function!); - } } /// @@ -222,9 +174,7 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearch), - Function, - typeof(global::G.AssistantToolsFunction), + typeof(global::G.AssistantToolsFileSearchTypeOnly), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -242,8 +192,7 @@ public bool Equals(ToolsItem3 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem4.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem4.g.verified.cs index d58b82cb8d..a57a960987 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem4.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem4.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.RunObjectToolDiscriminatorType? Type { get; } + public global::G.CreateRunRequestToolDiscriminatorType? Type { get; } /// /// @@ -124,7 +124,7 @@ public ToolsItem4(global::G.AssistantToolsFunction? value) /// /// public ToolsItem4( - global::G.RunObjectToolDiscriminatorType? type, + global::G.CreateRunRequestToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, global::G.AssistantToolsFileSearch? fileSearch, global::G.AssistantToolsFunction? function diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem5.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem5.g.verified.cs index 334f43b10f..18d3b76c38 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem5.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem5.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.CreateRunRequestToolDiscriminatorType? Type { get; } + public global::G.CreateThreadAndRunRequestToolDiscriminatorType? Type { get; } /// /// @@ -124,7 +124,7 @@ public ToolsItem5(global::G.AssistantToolsFunction? value) /// /// public ToolsItem5( - global::G.CreateRunRequestToolDiscriminatorType? type, + global::G.CreateThreadAndRunRequestToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, global::G.AssistantToolsFileSearch? fileSearch, global::G.AssistantToolsFunction? function diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem6.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem6.g.verified.cs index 8dc953639a..37d293f114 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem6.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem6.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.CreateThreadAndRunRequestToolDiscriminatorType? Type { get; } + public global::G.MessageObjectAttachmentToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem6(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearch? FileSearch { get; init; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearch? FileSearch { get; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } #endif /// @@ -70,78 +70,40 @@ public ToolsItem6(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem6(global::G.AssistantToolsFileSearch value) => new ToolsItem6(value); + public static implicit operator ToolsItem6(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem6(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem6 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem6 @this) => @this.FileSearch; /// /// /// - public ToolsItem6(global::G.AssistantToolsFileSearch? value) + public ToolsItem6(global::G.AssistantToolsFileSearchTypeOnly? value) { FileSearch = value; } - /// - /// - /// -#if NET6_0_OR_GREATER - public global::G.AssistantToolsFunction? Function { get; init; } -#else - public global::G.AssistantToolsFunction? Function { get; } -#endif - - /// - /// - /// -#if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] -#endif - public bool IsFunction => Function != null; - - /// - /// - /// - public static implicit operator ToolsItem6(global::G.AssistantToolsFunction value) => new ToolsItem6(value); - - /// - /// - /// - public static implicit operator global::G.AssistantToolsFunction?(ToolsItem6 @this) => @this.Function; - - /// - /// - /// - public ToolsItem6(global::G.AssistantToolsFunction? value) - { - Function = value; - } - /// /// /// public ToolsItem6( - global::G.CreateThreadAndRunRequestToolDiscriminatorType? type, + global::G.MessageObjectAttachmentToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearch? fileSearch, - global::G.AssistantToolsFunction? function + global::G.AssistantToolsFileSearchTypeOnly? fileSearch ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; - Function = function; } /// /// /// public object? Object => - Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -151,7 +113,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; + return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; } /// @@ -159,8 +121,7 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? fileSearch = null, bool validate = true) { if (validate) @@ -176,10 +137,6 @@ public bool Validate() { return fileSearch(FileSearch!); } - else if (IsFunction && function != null) - { - return function(Function!); - } return default(TResult); } @@ -189,8 +146,7 @@ public bool Validate() /// public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? fileSearch = null, bool validate = true) { if (validate) @@ -206,10 +162,6 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } - else if (IsFunction) - { - function?.Invoke(Function!); - } } /// @@ -222,9 +174,7 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearch), - Function, - typeof(global::G.AssistantToolsFunction), + typeof(global::G.AssistantToolsFileSearchTypeOnly), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -242,8 +192,7 @@ public bool Equals(ToolsItem6 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem7.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem7.g.verified.cs index 5d080f6db5..4bb5b7c86e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem7.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem7.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.MessageObjectAttachmentToolDiscriminatorType? Type { get; } + public global::G.ModifyAssistantRequestToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem7(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } + public global::G.AssistantToolsFileSearch? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } + public global::G.AssistantToolsFileSearch? FileSearch { get; } #endif /// @@ -70,40 +70,78 @@ public ToolsItem7(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem7(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem7(value); + public static implicit operator ToolsItem7(global::G.AssistantToolsFileSearch value) => new ToolsItem7(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem7 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem7 @this) => @this.FileSearch; /// /// /// - public ToolsItem7(global::G.AssistantToolsFileSearchTypeOnly? value) + public ToolsItem7(global::G.AssistantToolsFileSearch? value) { FileSearch = value; } + /// + /// + /// +#if NET6_0_OR_GREATER + public global::G.AssistantToolsFunction? Function { get; init; } +#else + public global::G.AssistantToolsFunction? Function { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] +#endif + public bool IsFunction => Function != null; + + /// + /// + /// + public static implicit operator ToolsItem7(global::G.AssistantToolsFunction value) => new ToolsItem7(value); + + /// + /// + /// + public static implicit operator global::G.AssistantToolsFunction?(ToolsItem7 @this) => @this.Function; + + /// + /// + /// + public ToolsItem7(global::G.AssistantToolsFunction? value) + { + Function = value; + } + /// /// /// public ToolsItem7( - global::G.MessageObjectAttachmentToolDiscriminatorType? type, + global::G.ModifyAssistantRequestToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearchTypeOnly? fileSearch + global::G.AssistantToolsFileSearch? fileSearch, + global::G.AssistantToolsFunction? function ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; + Function = function; } /// /// /// public object? Object => + Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -113,7 +151,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; + return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; } /// @@ -121,7 +159,8 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -137,6 +176,10 @@ public bool Validate() { return fileSearch(FileSearch!); } + else if (IsFunction && function != null) + { + return function(Function!); + } return default(TResult); } @@ -146,7 +189,8 @@ public bool Validate() /// public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -162,6 +206,10 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } + else if (IsFunction) + { + function?.Invoke(Function!); + } } /// @@ -174,7 +222,9 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearchTypeOnly), + typeof(global::G.AssistantToolsFileSearch), + Function, + typeof(global::G.AssistantToolsFunction), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -192,7 +242,8 @@ public bool Equals(ToolsItem7 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem8.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem8.g.verified.cs index be3be8b483..d5a50b6d44 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem8.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ToolsItem8.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.CreateMessageRequestAttachmentToolDiscriminatorType? Type { get; } + public global::G.RunObjectToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem8(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } + public global::G.AssistantToolsFileSearch? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } + public global::G.AssistantToolsFileSearch? FileSearch { get; } #endif /// @@ -70,40 +70,78 @@ public ToolsItem8(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem8(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem8(value); + public static implicit operator ToolsItem8(global::G.AssistantToolsFileSearch value) => new ToolsItem8(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem8 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem8 @this) => @this.FileSearch; /// /// /// - public ToolsItem8(global::G.AssistantToolsFileSearchTypeOnly? value) + public ToolsItem8(global::G.AssistantToolsFileSearch? value) { FileSearch = value; } + /// + /// + /// +#if NET6_0_OR_GREATER + public global::G.AssistantToolsFunction? Function { get; init; } +#else + public global::G.AssistantToolsFunction? Function { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] +#endif + public bool IsFunction => Function != null; + + /// + /// + /// + public static implicit operator ToolsItem8(global::G.AssistantToolsFunction value) => new ToolsItem8(value); + + /// + /// + /// + public static implicit operator global::G.AssistantToolsFunction?(ToolsItem8 @this) => @this.Function; + + /// + /// + /// + public ToolsItem8(global::G.AssistantToolsFunction? value) + { + Function = value; + } + /// /// /// public ToolsItem8( - global::G.CreateMessageRequestAttachmentToolDiscriminatorType? type, + global::G.RunObjectToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearchTypeOnly? fileSearch + global::G.AssistantToolsFileSearch? fileSearch, + global::G.AssistantToolsFunction? function ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; + Function = function; } /// /// /// public object? Object => + Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -113,7 +151,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; + return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; } /// @@ -121,7 +159,8 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -137,6 +176,10 @@ public bool Validate() { return fileSearch(FileSearch!); } + else if (IsFunction && function != null) + { + return function(Function!); + } return default(TResult); } @@ -146,7 +189,8 @@ public bool Validate() /// public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -162,6 +206,10 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } + else if (IsFunction) + { + function?.Invoke(Function!); + } } /// @@ -174,7 +222,9 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearchTypeOnly), + typeof(global::G.AssistantToolsFileSearch), + Function, + typeof(global::G.AssistantToolsFunction), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -192,7 +242,8 @@ public bool Equals(ToolsItem8 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs index 5bed954034..ff00e9b613 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs @@ -22,7 +22,7 @@ public sealed partial class UpdateVectorStoreRequest public global::G.VectorStoreExpirationAfter? ExpiresAfter { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata")] public object? Metadata { get; set; } @@ -43,7 +43,7 @@ public sealed partial class UpdateVectorStoreRequest /// The expiration policy for a vector store. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public UpdateVectorStoreRequest( string? name, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs index 0ae195a97e..086a141cc7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class UpdateVectorStoreRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesBucketWidth.g.verified.cs new file mode 100644 index 0000000000..24d6eb316d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageAudioSpeechesBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageAudioSpeechesBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioSpeechesBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioSpeechesBucketWidth value) + { + return value switch + { + UsageAudioSpeechesBucketWidth.x1m => "1m", + UsageAudioSpeechesBucketWidth.x1h => "1h", + UsageAudioSpeechesBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioSpeechesBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageAudioSpeechesBucketWidth.x1m, + "1h" => UsageAudioSpeechesBucketWidth.x1h, + "1d" => UsageAudioSpeechesBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesGroupByItem.g.verified.cs new file mode 100644 index 0000000000..f1e4bd618e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesGroupByItem.g.verified.cs @@ -0,0 +1,69 @@ +//HintName: G.Models.UsageAudioSpeechesGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageAudioSpeechesGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="user_id")] + UserId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="api_key_id")] + ApiKeyId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="model")] + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioSpeechesGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioSpeechesGroupByItem value) + { + return value switch + { + UsageAudioSpeechesGroupByItem.ProjectId => "project_id", + UsageAudioSpeechesGroupByItem.UserId => "user_id", + UsageAudioSpeechesGroupByItem.ApiKeyId => "api_key_id", + UsageAudioSpeechesGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioSpeechesGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageAudioSpeechesGroupByItem.ProjectId, + "user_id" => UsageAudioSpeechesGroupByItem.UserId, + "api_key_id" => UsageAudioSpeechesGroupByItem.ApiKeyId, + "model" => UsageAudioSpeechesGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResult.Json.g.verified.cs new file mode 100644 index 0000000000..31a850006c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageAudioSpeechesResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageAudioSpeechesResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageAudioSpeechesResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResult.g.verified.cs new file mode 100644 index 0000000000..3958be9d3d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResult.g.verified.cs @@ -0,0 +1,107 @@ +//HintName: G.Models.UsageAudioSpeechesResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated audio speeches usage details of the specific time bucket. + /// + public sealed partial class UsageAudioSpeechesResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageAudioSpeechesResultObject Object { get; set; } + + /// + /// The number of characters processed. + /// + [global::Newtonsoft.Json.JsonProperty("characters", Required = global::Newtonsoft.Json.Required.Always)] + public int Characters { get; set; } = default!; + + /// + /// The count of requests made to the model. + /// + [global::Newtonsoft.Json.JsonProperty("num_model_requests", Required = global::Newtonsoft.Json.Required.Always)] + public int NumModelRequests { get; set; } = default!; + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of characters processed. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + public UsageAudioSpeechesResult( + int characters, + int numModelRequests, + global::G.UsageAudioSpeechesResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.Characters = characters; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageAudioSpeechesResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResultObject.g.verified.cs new file mode 100644 index 0000000000..4709f7599a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioSpeechesResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageAudioSpeechesResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageAudioSpeechesResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.audio_speeches.result")] + OrganizationUsageAudioSpeechesResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioSpeechesResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioSpeechesResultObject value) + { + return value switch + { + UsageAudioSpeechesResultObject.OrganizationUsageAudioSpeechesResult => "organization.usage.audio_speeches.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioSpeechesResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.audio_speeches.result" => UsageAudioSpeechesResultObject.OrganizationUsageAudioSpeechesResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..d0ba16213d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageAudioTranscriptionsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageAudioTranscriptionsBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioTranscriptionsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioTranscriptionsBucketWidth value) + { + return value switch + { + UsageAudioTranscriptionsBucketWidth.x1m => "1m", + UsageAudioTranscriptionsBucketWidth.x1h => "1h", + UsageAudioTranscriptionsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioTranscriptionsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageAudioTranscriptionsBucketWidth.x1m, + "1h" => UsageAudioTranscriptionsBucketWidth.x1h, + "1d" => UsageAudioTranscriptionsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..572b5e0fdc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsGroupByItem.g.verified.cs @@ -0,0 +1,69 @@ +//HintName: G.Models.UsageAudioTranscriptionsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageAudioTranscriptionsGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="user_id")] + UserId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="api_key_id")] + ApiKeyId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="model")] + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioTranscriptionsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioTranscriptionsGroupByItem value) + { + return value switch + { + UsageAudioTranscriptionsGroupByItem.ProjectId => "project_id", + UsageAudioTranscriptionsGroupByItem.UserId => "user_id", + UsageAudioTranscriptionsGroupByItem.ApiKeyId => "api_key_id", + UsageAudioTranscriptionsGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioTranscriptionsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageAudioTranscriptionsGroupByItem.ProjectId, + "user_id" => UsageAudioTranscriptionsGroupByItem.UserId, + "api_key_id" => UsageAudioTranscriptionsGroupByItem.ApiKeyId, + "model" => UsageAudioTranscriptionsGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResult.Json.g.verified.cs new file mode 100644 index 0000000000..c2245939b7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageAudioTranscriptionsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageAudioTranscriptionsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageAudioTranscriptionsResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResult.g.verified.cs new file mode 100644 index 0000000000..bad710fd29 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResult.g.verified.cs @@ -0,0 +1,107 @@ +//HintName: G.Models.UsageAudioTranscriptionsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated audio transcriptions usage details of the specific time bucket. + /// + public sealed partial class UsageAudioTranscriptionsResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageAudioTranscriptionsResultObject Object { get; set; } + + /// + /// The number of seconds processed. + /// + [global::Newtonsoft.Json.JsonProperty("seconds", Required = global::Newtonsoft.Json.Required.Always)] + public int Seconds { get; set; } = default!; + + /// + /// The count of requests made to the model. + /// + [global::Newtonsoft.Json.JsonProperty("num_model_requests", Required = global::Newtonsoft.Json.Required.Always)] + public int NumModelRequests { get; set; } = default!; + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of seconds processed. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + public UsageAudioTranscriptionsResult( + int seconds, + int numModelRequests, + global::G.UsageAudioTranscriptionsResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.Seconds = seconds; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageAudioTranscriptionsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResultObject.g.verified.cs new file mode 100644 index 0000000000..031905ff51 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageAudioTranscriptionsResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageAudioTranscriptionsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageAudioTranscriptionsResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.audio_transcriptions.result")] + OrganizationUsageAudioTranscriptionsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioTranscriptionsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioTranscriptionsResultObject value) + { + return value switch + { + UsageAudioTranscriptionsResultObject.OrganizationUsageAudioTranscriptionsResult => "organization.usage.audio_transcriptions.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioTranscriptionsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.audio_transcriptions.result" => UsageAudioTranscriptionsResultObject.OrganizationUsageAudioTranscriptionsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..bcb3e62c9a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCodeInterpreterSessionsBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCodeInterpreterSessionsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCodeInterpreterSessionsBucketWidth value) + { + return value switch + { + UsageCodeInterpreterSessionsBucketWidth.x1m => "1m", + UsageCodeInterpreterSessionsBucketWidth.x1h => "1h", + UsageCodeInterpreterSessionsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCodeInterpreterSessionsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageCodeInterpreterSessionsBucketWidth.x1m, + "1h" => UsageCodeInterpreterSessionsBucketWidth.x1h, + "1d" => UsageCodeInterpreterSessionsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..c093931471 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCodeInterpreterSessionsGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCodeInterpreterSessionsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCodeInterpreterSessionsGroupByItem value) + { + return value switch + { + UsageCodeInterpreterSessionsGroupByItem.ProjectId => "project_id", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCodeInterpreterSessionsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageCodeInterpreterSessionsGroupByItem.ProjectId, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResult.Json.g.verified.cs new file mode 100644 index 0000000000..a8d59ab391 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageCodeInterpreterSessionsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageCodeInterpreterSessionsResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResult.g.verified.cs new file mode 100644 index 0000000000..2242138f1d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResult.g.verified.cs @@ -0,0 +1,63 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated code interpreter sessions usage details of the specific time bucket. + /// + public sealed partial class UsageCodeInterpreterSessionsResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageCodeInterpreterSessionsResultObject Object { get; set; } + + /// + /// The number of code interpreter sessions. + /// + [global::Newtonsoft.Json.JsonProperty("sessions", Required = global::Newtonsoft.Json.Required.Always)] + public int Sessions { get; set; } = default!; + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of code interpreter sessions. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + public UsageCodeInterpreterSessionsResult( + int sessions, + global::G.UsageCodeInterpreterSessionsResultObject @object, + string? projectId) + { + this.Sessions = sessions; + this.Object = @object; + this.ProjectId = projectId; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageCodeInterpreterSessionsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResultObject.g.verified.cs new file mode 100644 index 0000000000..946d04ae02 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCodeInterpreterSessionsResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCodeInterpreterSessionsResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.code_interpreter_sessions.result")] + OrganizationUsageCodeInterpreterSessionsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCodeInterpreterSessionsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCodeInterpreterSessionsResultObject value) + { + return value switch + { + UsageCodeInterpreterSessionsResultObject.OrganizationUsageCodeInterpreterSessionsResult => "organization.usage.code_interpreter_sessions.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCodeInterpreterSessionsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.code_interpreter_sessions.result" => UsageCodeInterpreterSessionsResultObject.OrganizationUsageCodeInterpreterSessionsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..8aad3cddc0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageCompletionsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCompletionsBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCompletionsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCompletionsBucketWidth value) + { + return value switch + { + UsageCompletionsBucketWidth.x1m => "1m", + UsageCompletionsBucketWidth.x1h => "1h", + UsageCompletionsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCompletionsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageCompletionsBucketWidth.x1m, + "1h" => UsageCompletionsBucketWidth.x1h, + "1d" => UsageCompletionsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..4e8ef36ce6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsGroupByItem.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.UsageCompletionsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCompletionsGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="user_id")] + UserId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="api_key_id")] + ApiKeyId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="model")] + Model, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="batch")] + Batch, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCompletionsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCompletionsGroupByItem value) + { + return value switch + { + UsageCompletionsGroupByItem.ProjectId => "project_id", + UsageCompletionsGroupByItem.UserId => "user_id", + UsageCompletionsGroupByItem.ApiKeyId => "api_key_id", + UsageCompletionsGroupByItem.Model => "model", + UsageCompletionsGroupByItem.Batch => "batch", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCompletionsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageCompletionsGroupByItem.ProjectId, + "user_id" => UsageCompletionsGroupByItem.UserId, + "api_key_id" => UsageCompletionsGroupByItem.ApiKeyId, + "model" => UsageCompletionsGroupByItem.Model, + "batch" => UsageCompletionsGroupByItem.Batch, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResult.Json.g.verified.cs new file mode 100644 index 0000000000..cd6e4d145a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageCompletionsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageCompletionsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageCompletionsResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResult.g.verified.cs new file mode 100644 index 0000000000..01e9f3ac40 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResult.g.verified.cs @@ -0,0 +1,140 @@ +//HintName: G.Models.UsageCompletionsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated completions usage details of the specific time bucket. + /// + public sealed partial class UsageCompletionsResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageCompletionsResultObject Object { get; set; } + + /// + /// The number of input tokens used. + /// + [global::Newtonsoft.Json.JsonProperty("input_tokens", Required = global::Newtonsoft.Json.Required.Always)] + public int InputTokens { get; set; } = default!; + + /// + /// The number of input tokens that has been cached from previous requests. + /// + [global::Newtonsoft.Json.JsonProperty("input_cached_tokens")] + public int? InputCachedTokens { get; set; } + + /// + /// The number of output tokens used. + /// + [global::Newtonsoft.Json.JsonProperty("output_tokens", Required = global::Newtonsoft.Json.Required.Always)] + public int OutputTokens { get; set; } = default!; + + /// + /// The count of requests made to the model. + /// + [global::Newtonsoft.Json.JsonProperty("num_model_requests", Required = global::Newtonsoft.Json.Required.Always)] + public int NumModelRequests { get; set; } = default!; + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("model")] + public string? Model { get; set; } + + /// + /// When `group_by=batch`, this field tells whether the grouped usage result is batch or not. + /// + [global::Newtonsoft.Json.JsonProperty("batch")] + public bool? Batch { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of input tokens used. + /// + /// + /// The number of input tokens that has been cached from previous requests. + /// + /// + /// The number of output tokens used. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + /// + /// When `group_by=batch`, this field tells whether the grouped usage result is batch or not. + /// + public UsageCompletionsResult( + int inputTokens, + int outputTokens, + int numModelRequests, + global::G.UsageCompletionsResultObject @object, + int? inputCachedTokens, + string? projectId, + string? userId, + string? apiKeyId, + string? model, + bool? batch) + { + this.InputTokens = inputTokens; + this.OutputTokens = outputTokens; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.InputCachedTokens = inputCachedTokens; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + this.Batch = batch; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageCompletionsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResultObject.g.verified.cs new file mode 100644 index 0000000000..3b21253218 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCompletionsResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageCompletionsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCompletionsResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.completions.result")] + OrganizationUsageCompletionsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCompletionsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCompletionsResultObject value) + { + return value switch + { + UsageCompletionsResultObject.OrganizationUsageCompletionsResult => "organization.usage.completions.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCompletionsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.completions.result" => UsageCompletionsResultObject.OrganizationUsageCompletionsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCostsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCostsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..5e27c48170 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCostsBucketWidth.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageCostsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCostsBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCostsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCostsBucketWidth value) + { + return value switch + { + UsageCostsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCostsBucketWidth? ToEnum(string value) + { + return value switch + { + "1d" => UsageCostsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCostsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCostsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..0aec22692c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageCostsGroupByItem.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.UsageCostsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageCostsGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="line_item")] + LineItem, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCostsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCostsGroupByItem value) + { + return value switch + { + UsageCostsGroupByItem.ProjectId => "project_id", + UsageCostsGroupByItem.LineItem => "line_item", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCostsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageCostsGroupByItem.ProjectId, + "line_item" => UsageCostsGroupByItem.LineItem, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..0ef09b7631 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageEmbeddingsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageEmbeddingsBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageEmbeddingsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageEmbeddingsBucketWidth value) + { + return value switch + { + UsageEmbeddingsBucketWidth.x1m => "1m", + UsageEmbeddingsBucketWidth.x1h => "1h", + UsageEmbeddingsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageEmbeddingsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageEmbeddingsBucketWidth.x1m, + "1h" => UsageEmbeddingsBucketWidth.x1h, + "1d" => UsageEmbeddingsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..20211715d6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsGroupByItem.g.verified.cs @@ -0,0 +1,69 @@ +//HintName: G.Models.UsageEmbeddingsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageEmbeddingsGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="user_id")] + UserId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="api_key_id")] + ApiKeyId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="model")] + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageEmbeddingsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageEmbeddingsGroupByItem value) + { + return value switch + { + UsageEmbeddingsGroupByItem.ProjectId => "project_id", + UsageEmbeddingsGroupByItem.UserId => "user_id", + UsageEmbeddingsGroupByItem.ApiKeyId => "api_key_id", + UsageEmbeddingsGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageEmbeddingsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageEmbeddingsGroupByItem.ProjectId, + "user_id" => UsageEmbeddingsGroupByItem.UserId, + "api_key_id" => UsageEmbeddingsGroupByItem.ApiKeyId, + "model" => UsageEmbeddingsGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResult.Json.g.verified.cs new file mode 100644 index 0000000000..a1c195086a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageEmbeddingsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageEmbeddingsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageEmbeddingsResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResult.g.verified.cs new file mode 100644 index 0000000000..752daba91b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResult.g.verified.cs @@ -0,0 +1,107 @@ +//HintName: G.Models.UsageEmbeddingsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated embeddings usage details of the specific time bucket. + /// + public sealed partial class UsageEmbeddingsResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageEmbeddingsResultObject Object { get; set; } + + /// + /// The number of input tokens used. + /// + [global::Newtonsoft.Json.JsonProperty("input_tokens", Required = global::Newtonsoft.Json.Required.Always)] + public int InputTokens { get; set; } = default!; + + /// + /// The count of requests made to the model. + /// + [global::Newtonsoft.Json.JsonProperty("num_model_requests", Required = global::Newtonsoft.Json.Required.Always)] + public int NumModelRequests { get; set; } = default!; + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of input tokens used. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + public UsageEmbeddingsResult( + int inputTokens, + int numModelRequests, + global::G.UsageEmbeddingsResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.InputTokens = inputTokens; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageEmbeddingsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResultObject.g.verified.cs new file mode 100644 index 0000000000..6220a399a4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageEmbeddingsResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageEmbeddingsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageEmbeddingsResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.embeddings.result")] + OrganizationUsageEmbeddingsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageEmbeddingsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageEmbeddingsResultObject value) + { + return value switch + { + UsageEmbeddingsResultObject.OrganizationUsageEmbeddingsResult => "organization.usage.embeddings.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageEmbeddingsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.embeddings.result" => UsageEmbeddingsResultObject.OrganizationUsageEmbeddingsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesBucketWidth.g.verified.cs new file mode 100644 index 0000000000..47f74c7fc0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageImagesBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageImagesBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesBucketWidth value) + { + return value switch + { + UsageImagesBucketWidth.x1m => "1m", + UsageImagesBucketWidth.x1h => "1h", + UsageImagesBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageImagesBucketWidth.x1m, + "1h" => UsageImagesBucketWidth.x1h, + "1d" => UsageImagesBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesGroupByItem.g.verified.cs new file mode 100644 index 0000000000..cbfa3c7e11 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesGroupByItem.g.verified.cs @@ -0,0 +1,83 @@ +//HintName: G.Models.UsageImagesGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageImagesGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="user_id")] + UserId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="api_key_id")] + ApiKeyId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="model")] + Model, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="size")] + Size, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="source")] + Source, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesGroupByItem value) + { + return value switch + { + UsageImagesGroupByItem.ProjectId => "project_id", + UsageImagesGroupByItem.UserId => "user_id", + UsageImagesGroupByItem.ApiKeyId => "api_key_id", + UsageImagesGroupByItem.Model => "model", + UsageImagesGroupByItem.Size => "size", + UsageImagesGroupByItem.Source => "source", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageImagesGroupByItem.ProjectId, + "user_id" => UsageImagesGroupByItem.UserId, + "api_key_id" => UsageImagesGroupByItem.ApiKeyId, + "model" => UsageImagesGroupByItem.Model, + "size" => UsageImagesGroupByItem.Size, + "source" => UsageImagesGroupByItem.Source, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResult.Json.g.verified.cs new file mode 100644 index 0000000000..a0b4ac661a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageImagesResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageImagesResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageImagesResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResult.g.verified.cs new file mode 100644 index 0000000000..1b054e87c1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResult.g.verified.cs @@ -0,0 +1,129 @@ +//HintName: G.Models.UsageImagesResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated images usage details of the specific time bucket. + /// + public sealed partial class UsageImagesResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageImagesResultObject Object { get; set; } + + /// + /// The number of images processed. + /// + [global::Newtonsoft.Json.JsonProperty("images", Required = global::Newtonsoft.Json.Required.Always)] + public int Images { get; set; } = default!; + + /// + /// The count of requests made to the model. + /// + [global::Newtonsoft.Json.JsonProperty("num_model_requests", Required = global::Newtonsoft.Json.Required.Always)] + public int NumModelRequests { get; set; } = default!; + + /// + /// When `group_by=source`, this field provides the source of the grouped usage result, possible values are `image.generation`, `image.edit`, `image.variation`. + /// + [global::Newtonsoft.Json.JsonProperty("source")] + public string? Source { get; set; } + + /// + /// When `group_by=size`, this field provides the image size of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("size")] + public string? Size { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of images processed. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=source`, this field provides the source of the grouped usage result, possible values are `image.generation`, `image.edit`, `image.variation`. + /// + /// + /// When `group_by=size`, this field provides the image size of the grouped usage result. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + public UsageImagesResult( + int images, + int numModelRequests, + global::G.UsageImagesResultObject @object, + string? source, + string? size, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.Images = images; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.Source = source; + this.Size = size; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageImagesResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResultObject.g.verified.cs new file mode 100644 index 0000000000..bb398af344 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageImagesResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageImagesResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.images.result")] + OrganizationUsageImagesResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesResultObject value) + { + return value switch + { + UsageImagesResultObject.OrganizationUsageImagesResult => "organization.usage.images.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.images.result" => UsageImagesResultObject.OrganizationUsageImagesResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesSize.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesSize.g.verified.cs new file mode 100644 index 0000000000..2494b75f9e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesSize.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.UsageImagesSize.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageImagesSize + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="256x256")] + x256x256, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="512x512")] + x512x512, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1024x1024")] + x1024x1024, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1792x1792")] + x1792x1792, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1024x1792")] + x1024x1792, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesSizeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesSize value) + { + return value switch + { + UsageImagesSize.x256x256 => "256x256", + UsageImagesSize.x512x512 => "512x512", + UsageImagesSize.x1024x1024 => "1024x1024", + UsageImagesSize.x1792x1792 => "1792x1792", + UsageImagesSize.x1024x1792 => "1024x1792", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesSize? ToEnum(string value) + { + return value switch + { + "256x256" => UsageImagesSize.x256x256, + "512x512" => UsageImagesSize.x512x512, + "1024x1024" => UsageImagesSize.x1024x1024, + "1792x1792" => UsageImagesSize.x1792x1792, + "1024x1792" => UsageImagesSize.x1024x1792, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesSource.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesSource.g.verified.cs new file mode 100644 index 0000000000..65be5c2da3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageImagesSource.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageImagesSource.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageImagesSource + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image.generation")] + ImageGeneration, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image.edit")] + ImageEdit, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="image.variation")] + ImageVariation, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesSourceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesSource value) + { + return value switch + { + UsageImagesSource.ImageGeneration => "image.generation", + UsageImagesSource.ImageEdit => "image.edit", + UsageImagesSource.ImageVariation => "image.variation", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesSource? ToEnum(string value) + { + return value switch + { + "image.generation" => UsageImagesSource.ImageGeneration, + "image.edit" => UsageImagesSource.ImageEdit, + "image.variation" => UsageImagesSource.ImageVariation, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..340b0f5968 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageModerationsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageModerationsBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageModerationsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageModerationsBucketWidth value) + { + return value switch + { + UsageModerationsBucketWidth.x1m => "1m", + UsageModerationsBucketWidth.x1h => "1h", + UsageModerationsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageModerationsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageModerationsBucketWidth.x1m, + "1h" => UsageModerationsBucketWidth.x1h, + "1d" => UsageModerationsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..46545f78d3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsGroupByItem.g.verified.cs @@ -0,0 +1,69 @@ +//HintName: G.Models.UsageModerationsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageModerationsGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="user_id")] + UserId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="api_key_id")] + ApiKeyId, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="model")] + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageModerationsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageModerationsGroupByItem value) + { + return value switch + { + UsageModerationsGroupByItem.ProjectId => "project_id", + UsageModerationsGroupByItem.UserId => "user_id", + UsageModerationsGroupByItem.ApiKeyId => "api_key_id", + UsageModerationsGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageModerationsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageModerationsGroupByItem.ProjectId, + "user_id" => UsageModerationsGroupByItem.UserId, + "api_key_id" => UsageModerationsGroupByItem.ApiKeyId, + "model" => UsageModerationsGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResult.Json.g.verified.cs new file mode 100644 index 0000000000..1e0c7bfd4b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageModerationsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageModerationsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageModerationsResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResult.g.verified.cs new file mode 100644 index 0000000000..702c6d914a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResult.g.verified.cs @@ -0,0 +1,107 @@ +//HintName: G.Models.UsageModerationsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated moderations usage details of the specific time bucket. + /// + public sealed partial class UsageModerationsResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageModerationsResultObject Object { get; set; } + + /// + /// The number of input tokens used. + /// + [global::Newtonsoft.Json.JsonProperty("input_tokens", Required = global::Newtonsoft.Json.Required.Always)] + public int InputTokens { get; set; } = default!; + + /// + /// The count of requests made to the model. + /// + [global::Newtonsoft.Json.JsonProperty("num_model_requests", Required = global::Newtonsoft.Json.Required.Always)] + public int NumModelRequests { get; set; } = default!; + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of input tokens used. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + public UsageModerationsResult( + int inputTokens, + int numModelRequests, + global::G.UsageModerationsResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.InputTokens = inputTokens; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageModerationsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResultObject.g.verified.cs new file mode 100644 index 0000000000..01333b6783 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageModerationsResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageModerationsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageModerationsResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.moderations.result")] + OrganizationUsageModerationsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageModerationsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageModerationsResultObject value) + { + return value switch + { + UsageModerationsResultObject.OrganizationUsageModerationsResult => "organization.usage.moderations.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageModerationsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.moderations.result" => UsageModerationsResultObject.OrganizationUsageModerationsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponse.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponse.Json.g.verified.cs new file mode 100644 index 0000000000..66310e1fd2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponse.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageResponse.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageResponse + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageResponse? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponse.g.verified.cs new file mode 100644 index 0000000000..7a4e62d0d7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponse.g.verified.cs @@ -0,0 +1,68 @@ +//HintName: G.Models.UsageResponse.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class UsageResponse + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageResponseObject Object { get; set; } + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("data", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Data { get; set; } = default!; + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("has_more", Required = global::Newtonsoft.Json.Required.Always)] + public bool HasMore { get; set; } = default!; + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("next_page", Required = global::Newtonsoft.Json.Required.Always)] + public string NextPage { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// + /// + public UsageResponse( + global::System.Collections.Generic.IList data, + bool hasMore, + string nextPage, + global::G.UsageResponseObject @object) + { + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.HasMore = hasMore; + this.NextPage = nextPage ?? throw new global::System.ArgumentNullException(nameof(nextPage)); + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageResponse() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponseObject.g.verified.cs similarity index 66% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesResponseObject.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponseObject.g.verified.cs index 738fe962dd..d371eb947e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.ListFilesResponseObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageResponseObject.g.verified.cs @@ -1,4 +1,4 @@ -//HintName: G.Models.ListFilesResponseObject.g.cs +//HintName: G.Models.UsageResponseObject.g.cs #nullable enable @@ -8,39 +8,39 @@ namespace G /// /// [global::System.Runtime.Serialization.DataContract] - public enum ListFilesResponseObject + public enum UsageResponseObject { /// /// /// - [global::System.Runtime.Serialization.EnumMember(Value="list")] - List, + [global::System.Runtime.Serialization.EnumMember(Value="page")] + Page, } /// /// Enum extensions to do fast conversions without the reflection. /// - public static class ListFilesResponseObjectExtensions + public static class UsageResponseObjectExtensions { /// /// Converts an enum to a string. /// - public static string ToValueString(this ListFilesResponseObject value) + public static string ToValueString(this UsageResponseObject value) { return value switch { - ListFilesResponseObject.List => "list", + UsageResponseObject.Page => "page", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } /// /// Converts an string to a enum. /// - public static ListFilesResponseObject? ToEnum(string value) + public static UsageResponseObject? ToEnum(string value) { return value switch { - "list" => ListFilesResponseObject.List, + "page" => UsageResponseObject.Page, _ => null, }; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucket.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucket.Json.g.verified.cs new file mode 100644 index 0000000000..68dc6ae8f9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucket.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageTimeBucket.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageTimeBucket + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageTimeBucket? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucket.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucket.g.verified.cs new file mode 100644 index 0000000000..46fc13213e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucket.g.verified.cs @@ -0,0 +1,68 @@ +//HintName: G.Models.UsageTimeBucket.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class UsageTimeBucket + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageTimeBucketObject Object { get; set; } + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("start_time", Required = global::Newtonsoft.Json.Required.Always)] + public int StartTime { get; set; } = default!; + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("end_time", Required = global::Newtonsoft.Json.Required.Always)] + public int EndTime { get; set; } = default!; + + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("result", Required = global::Newtonsoft.Json.Required.Always)] + public global::System.Collections.Generic.IList Result { get; set; } = default!; + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// + /// + public UsageTimeBucket( + int startTime, + int endTime, + global::System.Collections.Generic.IList result, + global::G.UsageTimeBucketObject @object) + { + this.StartTime = startTime; + this.EndTime = endTime; + this.Result = result ?? throw new global::System.ArgumentNullException(nameof(result)); + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageTimeBucket() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketObject.g.verified.cs new file mode 100644 index 0000000000..67c7dd7e4f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageTimeBucketObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageTimeBucketObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="bucket")] + Bucket, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageTimeBucketObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageTimeBucketObject value) + { + return value switch + { + UsageTimeBucketObject.Bucket => "bucket", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageTimeBucketObject? ToEnum(string value) + { + return value switch + { + "bucket" => UsageTimeBucketObject.Bucket, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.verified.cs new file mode 100644 index 0000000000..4e3e6054b9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageTimeBucketResultItemDiscriminator + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageTimeBucketResultItemDiscriminator? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.g.verified.cs new file mode 100644 index 0000000000..6675ece1dd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.g.verified.cs @@ -0,0 +1,41 @@ +//HintName: G.Models.UsageTimeBucketResultItemDiscriminator.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class UsageTimeBucketResultItemDiscriminator + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageTimeBucketResultItemDiscriminatorObject? Object { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + public UsageTimeBucketResultItemDiscriminator( + global::G.UsageTimeBucketResultItemDiscriminatorObject? @object) + { + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageTimeBucketResultItemDiscriminator() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs new file mode 100644 index 0000000000..43a50ccbb8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs @@ -0,0 +1,104 @@ +//HintName: G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageTimeBucketResultItemDiscriminatorObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.completions.result")] + OrganizationUsageCompletionsResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.embeddings.result")] + OrganizationUsageEmbeddingsResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.moderations.result")] + OrganizationUsageModerationsResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.images.result")] + OrganizationUsageImagesResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.audio_speeches.result")] + OrganizationUsageAudioSpeechesResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.audio_transcriptions.result")] + OrganizationUsageAudioTranscriptionsResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.vector_stores.result")] + OrganizationUsageVectorStoresResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.code_interpreter_sessions.result")] + OrganizationUsageCodeInterpreterSessionsResult, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.costs.result")] + OrganizationCostsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageTimeBucketResultItemDiscriminatorObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageTimeBucketResultItemDiscriminatorObject value) + { + return value switch + { + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCompletionsResult => "organization.usage.completions.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageEmbeddingsResult => "organization.usage.embeddings.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageModerationsResult => "organization.usage.moderations.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageImagesResult => "organization.usage.images.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioSpeechesResult => "organization.usage.audio_speeches.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioTranscriptionsResult => "organization.usage.audio_transcriptions.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageVectorStoresResult => "organization.usage.vector_stores.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCodeInterpreterSessionsResult => "organization.usage.code_interpreter_sessions.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationCostsResult => "organization.costs.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageTimeBucketResultItemDiscriminatorObject? ToEnum(string value) + { + return value switch + { + "organization.usage.completions.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCompletionsResult, + "organization.usage.embeddings.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageEmbeddingsResult, + "organization.usage.moderations.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageModerationsResult, + "organization.usage.images.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageImagesResult, + "organization.usage.audio_speeches.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioSpeechesResult, + "organization.usage.audio_transcriptions.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioTranscriptionsResult, + "organization.usage.vector_stores.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageVectorStoresResult, + "organization.usage.code_interpreter_sessions.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCodeInterpreterSessionsResult, + "organization.costs.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationCostsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresBucketWidth.g.verified.cs new file mode 100644 index 0000000000..fd3d561ded --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresBucketWidth.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.UsageVectorStoresBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageVectorStoresBucketWidth + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1m")] + x1m, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1h")] + x1h, + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="1d")] + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageVectorStoresBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageVectorStoresBucketWidth value) + { + return value switch + { + UsageVectorStoresBucketWidth.x1m => "1m", + UsageVectorStoresBucketWidth.x1h => "1h", + UsageVectorStoresBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageVectorStoresBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageVectorStoresBucketWidth.x1m, + "1h" => UsageVectorStoresBucketWidth.x1h, + "1d" => UsageVectorStoresBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresGroupByItem.g.verified.cs new file mode 100644 index 0000000000..219331402e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresGroupByItem.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageVectorStoresGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageVectorStoresGroupByItem + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="project_id")] + ProjectId, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageVectorStoresGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageVectorStoresGroupByItem value) + { + return value switch + { + UsageVectorStoresGroupByItem.ProjectId => "project_id", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageVectorStoresGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageVectorStoresGroupByItem.ProjectId, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResult.Json.g.verified.cs new file mode 100644 index 0000000000..f9056bc6ec --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResult.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.UsageVectorStoresResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageVectorStoresResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageVectorStoresResult? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask(serializer.Deserialize(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResult.g.verified.cs new file mode 100644 index 0000000000..1dad1638cf --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResult.g.verified.cs @@ -0,0 +1,63 @@ +//HintName: G.Models.UsageVectorStoresResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated vector stores usage details of the specific time bucket. + /// + public sealed partial class UsageVectorStoresResult + { + /// + /// + /// + [global::Newtonsoft.Json.JsonProperty("object")] + public global::G.UsageVectorStoresResultObject Object { get; set; } + + /// + /// The vector stores usage in bytes. + /// + [global::Newtonsoft.Json.JsonProperty("usage_bytes", Required = global::Newtonsoft.Json.Required.Always)] + public int UsageBytes { get; set; } = default!; + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::Newtonsoft.Json.JsonProperty("project_id")] + public string? ProjectId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::Newtonsoft.Json.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The vector stores usage in bytes. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + public UsageVectorStoresResult( + int usageBytes, + global::G.UsageVectorStoresResultObject @object, + string? projectId) + { + this.UsageBytes = usageBytes; + this.Object = @object; + this.ProjectId = projectId; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageVectorStoresResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResultObject.g.verified.cs new file mode 100644 index 0000000000..d97aa40bec --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.UsageVectorStoresResultObject.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.UsageVectorStoresResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + [global::System.Runtime.Serialization.DataContract] + public enum UsageVectorStoresResultObject + { + /// + /// + /// + [global::System.Runtime.Serialization.EnumMember(Value="organization.usage.vector_stores.result")] + OrganizationUsageVectorStoresResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageVectorStoresResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageVectorStoresResultObject value) + { + return value switch + { + UsageVectorStoresResultObject.OrganizationUsageVectorStoresResult => "organization.usage.vector_stores.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageVectorStoresResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.vector_stores.result" => UsageVectorStoresResultObject.OrganizationUsageVectorStoresResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObject.g.verified.cs index c911ad8a61..dc3cd34024 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObject.g.verified.cs @@ -70,7 +70,7 @@ public sealed partial class VectorStoreObject public global::System.DateTimeOffset? LastActiveAt { get; set; } = default!; /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::Newtonsoft.Json.JsonProperty("metadata", Required = global::Newtonsoft.Json.Required.Always)] public object? Metadata { get; set; } = default!; @@ -113,7 +113,7 @@ public sealed partial class VectorStoreObject /// The Unix timestamp (in seconds) for when the vector store was last active. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public VectorStoreObject( string id, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs index fa2d1a1933..a9ca4618b4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class VectorStoreObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModelsClient.RetrieveModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModelsClient.RetrieveModel.g.verified.cs index 5ad141530a..88efc72128 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModelsClient.RetrieveModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModelsClient.RetrieveModel.g.verified.cs @@ -30,7 +30,7 @@ partial void ProcessRetrieveModelResponseContent( /// /// The token to cancel the operation with /// - public async global::System.Threading.Tasks.Task RetrieveModelAsync( + public async global::System.Threading.Tasks.Task RetrieveModelAsync( string model, global::System.Threading.CancellationToken cancellationToken = default) { @@ -121,7 +121,7 @@ partial void ProcessRetrieveModelResponseContent( } return - global::G.Model12.FromJson(__content, JsonSerializerOptions) ?? + global::G.Model15.FromJson(__content, JsonSerializerOptions) ?? throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); } else @@ -147,7 +147,7 @@ partial void ProcessRetrieveModelResponseContent( using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); return - await global::G.Model12.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + await global::G.Model15.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? throw new global::System.InvalidOperationException("Response deserialization failed."); } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.CreateModeration.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.CreateModeration.g.verified.cs index 60b193a17b..0e5d32fcb1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.CreateModeration.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.CreateModeration.g.verified.cs @@ -23,7 +23,8 @@ partial void ProcessCreateModerationResponseContent( ref string content); /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// /// The token to cancel the operation with @@ -159,21 +160,24 @@ partial void ProcessCreateModerationResponseContent( } /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. /// /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task CreateModerationAsync( - global::G.OneOf> input, + global::G.OneOf, global::System.Collections.Generic.IList> input, global::G.AnyOf? model = default, global::System.Threading.CancellationToken cancellationToken = default) { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.g.verified.cs index d7c86ad102..69a307c849 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ModerationsClient.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Given a input text, outputs if the model classifies it as potentially harmful.
+ /// Given text and/or image inputs, classifies if those inputs are potentially harmful.
/// If no httpClient is provided, a new one will be created.
/// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. ///
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.OpenAiClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.OpenAiClient.g.verified.cs index 0bc206d9ec..e45ba65038 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.OpenAiClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.OpenAiClient.g.verified.cs @@ -138,7 +138,7 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst }; /// - /// Given a input text, outputs if the model classifies it as potentially harmful. + /// Given text and/or image inputs, classifies if those inputs are potentially harmful. /// public ModerationsClient Moderations => new ModerationsClient(HttpClient, authorizations: Authorizations) { @@ -158,7 +158,7 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst /// /// /// - public VectorStoresClient VectorStores => new VectorStoresClient(HttpClient, authorizations: Authorizations) + public UsageClient Usage => new UsageClient(HttpClient, authorizations: Authorizations) { ReadResponseAsString = ReadResponseAsString, JsonSerializerOptions = JsonSerializerOptions, @@ -173,6 +173,15 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst JsonSerializerOptions = JsonSerializerOptions, }; + /// + /// + /// + public ProjectsClient Projects => new ProjectsClient(HttpClient, authorizations: Authorizations) + { + ReadResponseAsString = ReadResponseAsString, + JsonSerializerOptions = JsonSerializerOptions, + }; + /// /// /// @@ -185,7 +194,7 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst /// /// /// - public ProjectsClient Projects => new ProjectsClient(HttpClient, authorizations: Authorizations) + public VectorStoresClient VectorStores => new VectorStoresClient(HttpClient, authorizations: Authorizations) { ReadResponseAsString = ReadResponseAsString, JsonSerializerOptions = JsonSerializerOptions, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ListProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ListProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..7ab7bfda9e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ListProjectRateLimits.g.verified.cs @@ -0,0 +1,178 @@ +//HintName: G.ProjectsClient.ListProjectRateLimits.g.cs + +#nullable enable + +namespace G +{ + public partial class ProjectsClient + { + partial void PrepareListProjectRateLimitsArguments( + global::System.Net.Http.HttpClient httpClient, + ref string projectId, + ref int? limit, + ref string? after, + ref string? before); + partial void PrepareListProjectRateLimitsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, + int? limit, + string? after, + string? before); + partial void ProcessListProjectRateLimitsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessListProjectRateLimitsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Returns the rate limits per model for a project. + /// + /// + /// + /// Default Value: 100 + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task ListProjectRateLimitsAsync( + string projectId, + int? limit = default, + string? after = default, + string? before = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareListProjectRateLimitsArguments( + httpClient: HttpClient, + projectId: ref projectId, + limit: ref limit, + after: ref after, + before: ref before); + + var __pathBuilder = new PathBuilder( + path: $"/organization/projects/{projectId}/rate_limits", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("after", after) + .AddOptionalParameter("before", before) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareListProjectRateLimitsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + projectId: projectId, + limit: limit, + after: after, + before: before); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessListProjectRateLimitsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessListProjectRateLimitsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.ProjectRateLimitListResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.ProjectRateLimitListResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProject.g.verified.cs index 11bd8dcf28..ca784687a2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProject.g.verified.cs @@ -8,10 +8,12 @@ public partial class ProjectsClient { partial void PrepareModifyProjectArguments( global::System.Net.Http.HttpClient httpClient, + ref string projectId, global::G.ProjectUpdateRequest request); partial void PrepareModifyProjectRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, global::G.ProjectUpdateRequest request); partial void ProcessModifyProjectResponse( global::System.Net.Http.HttpClient httpClient, @@ -25,10 +27,12 @@ partial void ProcessModifyProjectResponseContent( /// /// Modifies a project in the organization. /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, global::G.ProjectUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -38,10 +42,11 @@ partial void ProcessModifyProjectResponseContent( client: HttpClient); PrepareModifyProjectArguments( httpClient: HttpClient, + projectId: ref projectId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/projects/{project_id}", + path: $"/organization/projects/{projectId}", baseUri: HttpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -80,6 +85,7 @@ partial void ProcessModifyProjectResponseContent( PrepareModifyProjectRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, + projectId: projectId, request: request); using var __response = await HttpClient.SendAsync( @@ -189,12 +195,14 @@ partial void ProcessModifyProjectResponseContent( /// /// Modifies a project in the organization. /// + /// /// /// The updated name of the project, this name appears in reports. /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, string name, global::System.Threading.CancellationToken cancellationToken = default) { @@ -204,6 +212,7 @@ partial void ProcessModifyProjectResponseContent( }; return await ModifyProjectAsync( + projectId: projectId, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs index 2b2dda94ff..1117bd0f79 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs @@ -8,10 +8,14 @@ public partial class ProjectsClient { partial void PrepareModifyProjectUserArguments( global::System.Net.Http.HttpClient httpClient, + ref string projectId, + ref string userId, global::G.ProjectUserUpdateRequest request); partial void PrepareModifyProjectUserRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, + string userId, global::G.ProjectUserUpdateRequest request); partial void ProcessModifyProjectUserResponse( global::System.Net.Http.HttpClient httpClient, @@ -25,10 +29,14 @@ partial void ProcessModifyProjectUserResponseContent( /// /// Modifies a user's role in the project. /// + /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -38,10 +46,12 @@ partial void ProcessModifyProjectUserResponseContent( client: HttpClient); PrepareModifyProjectUserArguments( httpClient: HttpClient, + projectId: ref projectId, + userId: ref userId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/projects/{project_id}/users/{user_id}", + path: $"/organization/projects/{projectId}/users/{userId}", baseUri: HttpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -80,6 +90,8 @@ partial void ProcessModifyProjectUserResponseContent( PrepareModifyProjectUserRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, + projectId: projectId, + userId: userId, request: request); using var __response = await HttpClient.SendAsync( @@ -189,12 +201,16 @@ partial void ProcessModifyProjectUserResponseContent( /// /// Modifies a user's role in the project. /// + /// + /// /// /// `owner` or `member` /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default) { @@ -204,6 +220,8 @@ partial void ProcessModifyProjectUserResponseContent( }; return await ModifyProjectUserAsync( + projectId: projectId, + userId: userId, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.UpdateProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.UpdateProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..9606e6f207 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.ProjectsClient.UpdateProjectRateLimits.g.verified.cs @@ -0,0 +1,254 @@ +//HintName: G.ProjectsClient.UpdateProjectRateLimits.g.cs + +#nullable enable + +namespace G +{ + public partial class ProjectsClient + { + partial void PrepareUpdateProjectRateLimitsArguments( + global::System.Net.Http.HttpClient httpClient, + ref string projectId, + ref string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request); + partial void PrepareUpdateProjectRateLimitsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, + string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request); + partial void ProcessUpdateProjectRateLimitsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUpdateProjectRateLimitsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request, + global::System.Threading.CancellationToken cancellationToken = default) + { + request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + + PrepareArguments( + client: HttpClient); + PrepareUpdateProjectRateLimitsArguments( + httpClient: HttpClient, + projectId: ref projectId, + rateLimitId: ref rateLimitId, + request: request); + + var __pathBuilder = new PathBuilder( + path: $"/organization/projects/{projectId}/rate_limits/{rateLimitId}", + baseUri: HttpClient.BaseAddress); + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Post, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + var __httpRequestContentBody = request.ToJson(JsonSerializerOptions); + var __httpRequestContent = new global::System.Net.Http.StringContent( + content: __httpRequestContentBody, + encoding: global::System.Text.Encoding.UTF8, + mediaType: "application/json"); + __httpRequest.Content = __httpRequestContent; + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUpdateProjectRateLimitsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + projectId: projectId, + rateLimitId: rateLimitId, + request: request); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUpdateProjectRateLimitsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + // Error response for various conditions. + if ((int)__response.StatusCode == 400) + { + string? __content_400 = null; + global::G.ErrorResponse? __value_400 = null; + if (ReadResponseAsString) + { + __content_400 = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + __value_400 = global::G.ErrorResponse.FromJson(__content_400, JsonSerializerOptions); + } + else + { + var __contentStream_400 = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + __value_400 = await global::G.ErrorResponse.FromJsonStreamAsync(__contentStream_400, JsonSerializerOptions).ConfigureAwait(false); + } + + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + statusCode: __response.StatusCode) + { + ResponseBody = __content_400, + ResponseObject = __value_400, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUpdateProjectRateLimitsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.ProjectRateLimit.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.ProjectRateLimit.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + int? maxRequestsPer1Minute = default, + int? maxTokensPer1Minute = default, + int? maxImagesPer1Minute = default, + int? maxAudioMegabytesPer1Minute = default, + int? maxRequestsPer1Day = default, + int? batch1DayMaxInputTokens = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + var __request = new global::G.ProjectRateLimitUpdateRequest + { + MaxRequestsPer1Minute = maxRequestsPer1Minute, + MaxTokensPer1Minute = maxTokensPer1Minute, + MaxImagesPer1Minute = maxImagesPer1Minute, + MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute, + MaxRequestsPer1Day = maxRequestsPer1Day, + Batch1DayMaxInputTokens = batch1DayMaxInputTokens, + }; + + return await UpdateProjectRateLimitsAsync( + projectId: projectId, + rateLimitId: rateLimitId, + request: __request, + cancellationToken: cancellationToken).ConfigureAwait(false); + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UploadsClient.CreateUpload.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UploadsClient.CreateUpload.g.verified.cs index 32ea68e062..3a1880c828 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UploadsClient.CreateUpload.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UploadsClient.CreateUpload.g.verified.cs @@ -26,7 +26,7 @@ partial void ProcessCreateUploadResponseContent( /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// @@ -166,7 +166,7 @@ partial void ProcessCreateUploadResponseContent( /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageAudioSpeeches.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageAudioSpeeches.g.verified.cs new file mode 100644 index 0000000000..07f334c247 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageAudioSpeeches.g.verified.cs @@ -0,0 +1,227 @@ +//HintName: G.UsageClient.UsageAudioSpeeches.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageAudioSpeechesArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageAudioSpeechesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageAudioSpeechesRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageAudioSpeechesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageAudioSpeechesResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageAudioSpeechesResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get audio speeches usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageAudioSpeechesAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioSpeechesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageAudioSpeechesArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageAudioSpeechesBucketWidth.x1m => "1m", + global::G.UsageAudioSpeechesBucketWidth.x1h => "1h", + global::G.UsageAudioSpeechesBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/audio_speeches", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageAudioSpeechesRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageAudioSpeechesResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageAudioSpeechesResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageAudioTranscriptions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageAudioTranscriptions.g.verified.cs new file mode 100644 index 0000000000..f178ede135 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageAudioTranscriptions.g.verified.cs @@ -0,0 +1,227 @@ +//HintName: G.UsageClient.UsageAudioTranscriptions.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageAudioTranscriptionsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageAudioTranscriptionsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageAudioTranscriptionsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageAudioTranscriptionsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get audio transcriptions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageAudioTranscriptionsAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageAudioTranscriptionsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageAudioTranscriptionsBucketWidth.x1m => "1m", + global::G.UsageAudioTranscriptionsBucketWidth.x1h => "1h", + global::G.UsageAudioTranscriptionsBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/audio_transcriptions", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageAudioTranscriptionsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageAudioTranscriptionsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageAudioTranscriptionsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCodeInterpreterSessions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCodeInterpreterSessions.g.verified.cs new file mode 100644 index 0000000000..d09518b6c6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCodeInterpreterSessions.g.verified.cs @@ -0,0 +1,206 @@ +//HintName: G.UsageClient.UsageCodeInterpreterSessions.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageCodeInterpreterSessionsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageCodeInterpreterSessionsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageCodeInterpreterSessionsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageCodeInterpreterSessionsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get code interpreter sessions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageCodeInterpreterSessionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageCodeInterpreterSessionsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageCodeInterpreterSessionsBucketWidth.x1m => "1m", + global::G.UsageCodeInterpreterSessionsBucketWidth.x1h => "1h", + global::G.UsageCodeInterpreterSessionsBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/code_interpreter_sessions", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageCodeInterpreterSessionsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageCodeInterpreterSessionsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageCodeInterpreterSessionsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCompletions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCompletions.g.verified.cs new file mode 100644 index 0000000000..46db67694f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCompletions.g.verified.cs @@ -0,0 +1,234 @@ +//HintName: G.UsageClient.UsageCompletions.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageCompletionsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageCompletionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + ref bool? batch, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageCompletionsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageCompletionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + bool? batch, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageCompletionsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageCompletionsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get completions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageCompletionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCompletionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + bool? batch = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageCompletionsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + batch: ref batch, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageCompletionsBucketWidth.x1m => "1m", + global::G.UsageCompletionsBucketWidth.x1h => "1h", + global::G.UsageCompletionsBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/completions", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("batch", batch?.ToString()) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageCompletionsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + batch: batch, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageCompletionsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageCompletionsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCosts.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCosts.g.verified.cs new file mode 100644 index 0000000000..53d11eae1a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageCosts.g.verified.cs @@ -0,0 +1,206 @@ +//HintName: G.UsageClient.UsageCosts.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageCostsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageCostsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageCostsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageCostsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageCostsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageCostsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get costs details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// Default Value: 7 + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageCostsAsync( + int startTime, + int? endTime = default, + global::G.UsageCostsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageCostsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageCostsBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/costs", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageCostsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageCostsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageCostsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageEmbeddings.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageEmbeddings.g.verified.cs new file mode 100644 index 0000000000..ceaa11b175 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageEmbeddings.g.verified.cs @@ -0,0 +1,227 @@ +//HintName: G.UsageClient.UsageEmbeddings.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageEmbeddingsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageEmbeddingsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageEmbeddingsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageEmbeddingsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageEmbeddingsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageEmbeddingsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get embeddings usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageEmbeddingsAsync( + int startTime, + int? endTime = default, + global::G.UsageEmbeddingsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageEmbeddingsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageEmbeddingsBucketWidth.x1m => "1m", + global::G.UsageEmbeddingsBucketWidth.x1h => "1h", + global::G.UsageEmbeddingsBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/embeddings", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageEmbeddingsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageEmbeddingsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageEmbeddingsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageImages.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageImages.g.verified.cs new file mode 100644 index 0000000000..6b52e363c0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageImages.g.verified.cs @@ -0,0 +1,239 @@ +//HintName: G.UsageClient.UsageImages.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageImagesArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageImagesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? sources, + global::System.Collections.Generic.IList? sizes, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageImagesRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageImagesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? sources, + global::System.Collections.Generic.IList? sizes, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageImagesResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageImagesResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get images usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageImagesAsync( + int startTime, + int? endTime = default, + global::G.UsageImagesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? sources = default, + global::System.Collections.Generic.IList? sizes = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageImagesArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + sources: sources, + sizes: sizes, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageImagesBucketWidth.x1m => "1m", + global::G.UsageImagesBucketWidth.x1h => "1h", + global::G.UsageImagesBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/images", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageImagesRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + sources: sources, + sizes: sizes, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageImagesResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageImagesResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageModerations.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageModerations.g.verified.cs new file mode 100644 index 0000000000..3b3188b77b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageModerations.g.verified.cs @@ -0,0 +1,227 @@ +//HintName: G.UsageClient.UsageModerations.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageModerationsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageModerationsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageModerationsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageModerationsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageModerationsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageModerationsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get moderations usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageModerationsAsync( + int startTime, + int? endTime = default, + global::G.UsageModerationsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageModerationsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageModerationsBucketWidth.x1m => "1m", + global::G.UsageModerationsBucketWidth.x1h => "1h", + global::G.UsageModerationsBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/moderations", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageModerationsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageModerationsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageModerationsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageVectorStores.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageVectorStores.g.verified.cs new file mode 100644 index 0000000000..567f57fa0d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.UsageVectorStores.g.verified.cs @@ -0,0 +1,206 @@ +//HintName: G.UsageClient.UsageVectorStores.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageVectorStoresArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageVectorStoresBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageVectorStoresRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageVectorStoresBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageVectorStoresResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageVectorStoresResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get vector stores usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageVectorStoresAsync( + int startTime, + int? endTime = default, + global::G.UsageVectorStoresBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageVectorStoresArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var bucketWidthValue = bucketWidth switch + { + global::G.UsageVectorStoresBucketWidth.x1m => "1m", + global::G.UsageVectorStoresBucketWidth.x1h => "1h", + global::G.UsageVectorStoresBucketWidth.x1d => "1d", + _ => throw new global::System.NotImplementedException("Enum value not implemented."), + }; + var __pathBuilder = new PathBuilder( + path: "/organization/usage/vector_stores", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidthValue?.ToString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageVectorStoresRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageVectorStoresResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync().ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageVectorStoresResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerOptions) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync().ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.g.verified.cs new file mode 100644 index 0000000000..2927d75261 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsageClient.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.UsageClient.g.cs + +#nullable enable + +namespace G +{ + /// + /// If no httpClient is provided, a new one will be created.
+ /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + ///
+ public sealed partial class UsageClient : global::G.IUsageClient, global::System.IDisposable + { + /// + /// + /// + public const string DefaultBaseUrl = "https://api.openai.com/v1"; + + private bool _disposeHttpClient = true; + + /// + public global::System.Net.Http.HttpClient HttpClient { get; } + + /// + public System.Uri? BaseUri => HttpClient.BaseAddress; + + /// + public global::System.Collections.Generic.List Authorizations { get; } + + /// + public bool ReadResponseAsString { get; set; } +#if DEBUG + = true; +#endif + /// + /// + /// + public global::Newtonsoft.Json.JsonSerializerSettings JsonSerializerOptions { get; set; } = new global::Newtonsoft.Json.JsonSerializerSettings(); + + + /// + /// Creates a new instance of the UsageClient. + /// If no httpClient is provided, a new one will be created. + /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + /// + /// The HttpClient instance. If not provided, a new one will be created. + /// The base URL for the API. If not provided, the default baseUri from OpenAPI spec will be used. + /// The authorizations to use for the requests. + /// Dispose the HttpClient when the instance is disposed. True by default. + public UsageClient( + global::System.Net.Http.HttpClient? httpClient = null, + global::System.Uri? baseUri = null, + global::System.Collections.Generic.List? authorizations = null, + bool disposeHttpClient = true) + { + HttpClient = httpClient ?? new global::System.Net.Http.HttpClient(); + HttpClient.BaseAddress ??= baseUri ?? new global::System.Uri(DefaultBaseUrl); + Authorizations = authorizations ?? new global::System.Collections.Generic.List(); + _disposeHttpClient = disposeHttpClient; + + Initialized(HttpClient); + } + + /// + public void Dispose() + { + if (_disposeHttpClient) + { + HttpClient.Dispose(); + } + } + + partial void Initialized( + global::System.Net.Http.HttpClient client); + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsersClient.ModifyUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsersClient.ModifyUser.g.verified.cs index cebd5f4a0c..c69c2672f0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsersClient.ModifyUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.UsersClient.ModifyUser.g.verified.cs @@ -8,10 +8,12 @@ public partial class UsersClient { partial void PrepareModifyUserArguments( global::System.Net.Http.HttpClient httpClient, + ref string userId, global::G.UserRoleUpdateRequest request); partial void PrepareModifyUserRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string userId, global::G.UserRoleUpdateRequest request); partial void ProcessModifyUserResponse( global::System.Net.Http.HttpClient httpClient, @@ -25,10 +27,12 @@ partial void ProcessModifyUserResponseContent( /// /// Modifies a user's role in the organization. /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -38,10 +42,11 @@ partial void ProcessModifyUserResponseContent( client: HttpClient); PrepareModifyUserArguments( httpClient: HttpClient, + userId: ref userId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/users/{user_id}", + path: $"/organization/users/{userId}", baseUri: HttpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -80,6 +85,7 @@ partial void ProcessModifyUserResponseContent( PrepareModifyUserRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, + userId: userId, request: request); using var __response = await HttpClient.SendAsync( @@ -161,12 +167,14 @@ partial void ProcessModifyUserResponseContent( /// /// Modifies a user's role in the organization. /// + /// /// /// `owner` or `reader` /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default) { @@ -176,6 +184,7 @@ partial void ProcessModifyUserResponseContent( }; return await ModifyUserAsync( + userId: userId, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs index be59935d50..c980476037 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs @@ -174,7 +174,7 @@ partial void ProcessCreateVectorStoreResponseContent( /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs index 2c73c2f824..cd1b7eaac3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs @@ -175,7 +175,7 @@ partial void ProcessModifyVectorStoreResponseContent( /// The expiration policy for a vector store. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#OneOf.3.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#OneOf.3.Json.g.verified.cs new file mode 100644 index 0000000000..f4bd1d7dba --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#OneOf.3.Json.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: OneOf.3.Json.g.cs +#nullable enable + +namespace G +{ + public readonly partial struct OneOf + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.SerializeObject( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.OneOf? FromJson( + string json, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + return global::Newtonsoft.Json.JsonConvert.DeserializeObject>( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask?> FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::Newtonsoft.Json.JsonSerializerSettings? jsonSerializerOptions = null) + { + using var streamReader = new global::System.IO.StreamReader(jsonStream); + using var jsonReader = new global::Newtonsoft.Json.JsonTextReader(streamReader); + var serializer = global::Newtonsoft.Json.JsonSerializer.Create(jsonSerializerOptions); + return new global::System.Threading.Tasks.ValueTask?>(serializer.Deserialize>(jsonReader)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#OneOf.3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#OneOf.3.g.verified.cs new file mode 100644 index 0000000000..e4627d03aa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/NewtonsoftJson/_#OneOf.3.g.verified.cs @@ -0,0 +1,265 @@ +//HintName: OneOf.3.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public readonly partial struct OneOf : global::System.IEquatable> + { + /// + /// + /// +#if NET6_0_OR_GREATER + public T1? Value1 { get; init; } +#else + public T1? Value1 { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Value1))] +#endif + public bool IsValue1 => Value1 != null; + + /// + /// + /// + public static implicit operator OneOf(T1 value) => new OneOf(value); + + /// + /// + /// + public static implicit operator T1?(OneOf @this) => @this.Value1; + + /// + /// + /// + public OneOf(T1? value) + { + Value1 = value; + } + + /// + /// + /// +#if NET6_0_OR_GREATER + public T2? Value2 { get; init; } +#else + public T2? Value2 { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Value2))] +#endif + public bool IsValue2 => Value2 != null; + + /// + /// + /// + public static implicit operator OneOf(T2 value) => new OneOf(value); + + /// + /// + /// + public static implicit operator T2?(OneOf @this) => @this.Value2; + + /// + /// + /// + public OneOf(T2? value) + { + Value2 = value; + } + + /// + /// + /// +#if NET6_0_OR_GREATER + public T3? Value3 { get; init; } +#else + public T3? Value3 { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Value3))] +#endif + public bool IsValue3 => Value3 != null; + + /// + /// + /// + public static implicit operator OneOf(T3 value) => new OneOf(value); + + /// + /// + /// + public static implicit operator T3?(OneOf @this) => @this.Value3; + + /// + /// + /// + public OneOf(T3? value) + { + Value3 = value; + } + + /// + /// + /// + public OneOf( + T1? value1, + T2? value2, + T3? value3 + ) + { + Value1 = value1; + Value2 = value2; + Value3 = value3; + } + + /// + /// + /// + public object? Object => + Value3 as object ?? + Value2 as object ?? + Value1 as object + ; + + /// + /// + /// + public bool Validate() + { + return IsValue1 && !IsValue2 && !IsValue3 || !IsValue1 && IsValue2 && !IsValue3 || !IsValue1 && !IsValue2 && IsValue3; + } + + /// + /// + /// + public TResult? Match( + global::System.Func? value1 = null, + global::System.Func? value2 = null, + global::System.Func? value3 = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsValue1 && value1 != null) + { + return value1(Value1!); + } + else if (IsValue2 && value2 != null) + { + return value2(Value2!); + } + else if (IsValue3 && value3 != null) + { + return value3(Value3!); + } + + return default(TResult); + } + + /// + /// + /// + public void Match( + global::System.Action? value1 = null, + global::System.Action? value2 = null, + global::System.Action? value3 = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsValue1) + { + value1?.Invoke(Value1!); + } + else if (IsValue2) + { + value2?.Invoke(Value2!); + } + else if (IsValue3) + { + value3?.Invoke(Value3!); + } + } + + /// + /// + /// + public override int GetHashCode() + { + var fields = new object?[] + { + Value1, + typeof(T1), + Value2, + typeof(T2), + Value3, + typeof(T3), + }; + const int offset = unchecked((int)2166136261); + const int prime = 16777619; + static int HashCodeAggregator(int hashCode, object? value) => value == null + ? (hashCode ^ 0) * prime + : (hashCode ^ value.GetHashCode()) * prime; + + return global::System.Linq.Enumerable.Aggregate(fields, offset, HashCodeAggregator); + } + + /// + /// + /// + public bool Equals(OneOf other) + { + return + global::System.Collections.Generic.EqualityComparer.Default.Equals(Value1, other.Value1) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Value2, other.Value2) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Value3, other.Value3) + ; + } + + /// + /// + /// + public static bool operator ==(OneOf obj1, OneOf obj2) + { + return global::System.Collections.Generic.EqualityComparer>.Default.Equals(obj1, obj2); + } + + /// + /// + /// + public static bool operator !=(OneOf obj1, OneOf obj2) + { + return !(obj1 == obj2); + } + + /// + /// + /// + public override bool Equals(object? obj) + { + return obj is OneOf o && Equals(o); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs index e557f0bbb4..61b97a69e8 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateAssistant.g.verified.cs @@ -163,7 +163,7 @@ partial void ProcessCreateAssistantResponseContent( /// Create an assistant with a model and instructions. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -176,28 +176,30 @@ partial void ProcessCreateAssistantResponseContent( /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateMessage.g.verified.cs index 2ff94ae037..2415397087 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateMessage.g.verified.cs @@ -179,7 +179,7 @@ partial void ProcessCreateMessageResponseContent( /// A list of files attached to the message, and the tools they should be added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateRun.g.verified.cs index 86cea1f63a..d4059b1732 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateRun.g.verified.cs @@ -9,11 +9,13 @@ public partial class AssistantsClient partial void PrepareCreateRunArguments( global::System.Net.Http.HttpClient httpClient, ref string threadId, + global::System.Collections.Generic.IList? include, global::G.CreateRunRequest request); partial void PrepareCreateRunRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, string threadId, + global::System.Collections.Generic.IList? include, global::G.CreateRunRequest request); partial void ProcessCreateRunResponse( global::System.Net.Http.HttpClient httpClient, @@ -28,6 +30,7 @@ partial void ProcessCreateRunResponseContent( /// Create a run. /// /// + /// /// /// The token to cancel the operation with /// @@ -35,6 +38,7 @@ partial void ProcessCreateRunResponseContent( public async global::System.Threading.Tasks.Task CreateRunAsync( string threadId, global::G.CreateRunRequest request, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default) { request = request ?? throw new global::System.ArgumentNullException(nameof(request)); @@ -44,6 +48,7 @@ partial void ProcessCreateRunResponseContent( PrepareCreateRunArguments( httpClient: HttpClient, threadId: ref threadId, + include: include, request: request); var __pathBuilder = new PathBuilder( @@ -87,6 +92,7 @@ partial void ProcessCreateRunResponseContent( httpClient: HttpClient, httpRequestMessage: __httpRequest, threadId: threadId, + include: include, request: request); using var __response = await HttpClient.SendAsync( @@ -169,6 +175,7 @@ partial void ProcessCreateRunResponseContent( /// Create a run. /// /// + /// /// /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. /// @@ -189,15 +196,16 @@ partial void ProcessCreateRunResponseContent( /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -221,12 +229,13 @@ partial void ProcessCreateRunResponseContent( /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -235,11 +244,12 @@ partial void ProcessCreateRunResponseContent( public async global::System.Threading.Tasks.Task CreateRunAsync( string threadId, string assistantId, + global::System.Collections.Generic.IList? include = default, global::G.AnyOf? model = default, string? instructions = default, string? additionalInstructions = default, global::System.Collections.Generic.IList? additionalMessages = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, object? metadata = default, double? temperature = default, double? topP = default, @@ -274,6 +284,7 @@ partial void ProcessCreateRunResponseContent( return await CreateRunAsync( threadId: threadId, + include: include, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThread.g.verified.cs index 112ce97ace..f4881f66e9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThread.g.verified.cs @@ -169,7 +169,7 @@ partial void ProcessCreateThreadResponseContent( /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs index 8c2352c3b4..59955792b2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.CreateThreadAndRun.g.verified.cs @@ -180,15 +180,16 @@ partial void ProcessCreateThreadAndRunResponseContent( /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -212,12 +213,13 @@ partial void ProcessCreateThreadAndRunResponseContent( /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -228,7 +230,7 @@ partial void ProcessCreateThreadAndRunResponseContent( global::G.CreateThreadRequest? thread = default, global::G.AnyOf? model = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.CreateThreadAndRunRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.GetRunStep.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.GetRunStep.g.verified.cs index d7e5c1bde8..8955c2a357 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.GetRunStep.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.GetRunStep.g.verified.cs @@ -10,13 +10,15 @@ partial void PrepareGetRunStepArguments( global::System.Net.Http.HttpClient httpClient, ref string threadId, ref string runId, - ref string stepId); + ref string stepId, + global::System.Collections.Generic.IList? include); partial void PrepareGetRunStepRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, string threadId, string runId, - string stepId); + string stepId, + global::System.Collections.Generic.IList? include); partial void ProcessGetRunStepResponse( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpResponseMessage httpResponseMessage); @@ -32,6 +34,7 @@ partial void ProcessGetRunStepResponseContent( /// /// /// + /// /// The token to cancel the operation with /// [global::System.Diagnostics.CodeAnalysis.Experimental(diagnosticId: "G_BETA_001")] @@ -39,6 +42,7 @@ partial void ProcessGetRunStepResponseContent( string threadId, string runId, string stepId, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default) { PrepareArguments( @@ -47,7 +51,8 @@ partial void ProcessGetRunStepResponseContent( httpClient: HttpClient, threadId: ref threadId, runId: ref runId, - stepId: ref stepId); + stepId: ref stepId, + include: include); var __pathBuilder = new PathBuilder( path: $"/threads/{threadId}/runs/{runId}/steps/{stepId}", @@ -85,7 +90,8 @@ partial void ProcessGetRunStepResponseContent( httpRequestMessage: __httpRequest, threadId: threadId, runId: runId, - stepId: stepId); + stepId: stepId, + include: include); using var __response = await HttpClient.SendAsync( request: __httpRequest, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs index d8f5743fde..aa439cecc9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ListRunSteps.g.verified.cs @@ -13,7 +13,8 @@ partial void PrepareListRunStepsArguments( ref int? limit, ref global::G.ListRunStepsOrder? order, ref string? after, - ref string? before); + ref string? before, + global::System.Collections.Generic.IList? include); partial void PrepareListRunStepsRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, @@ -22,7 +23,8 @@ partial void PrepareListRunStepsRequest( int? limit, global::G.ListRunStepsOrder? order, string? after, - string? before); + string? before, + global::System.Collections.Generic.IList? include); partial void ProcessListRunStepsResponse( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpResponseMessage httpResponseMessage); @@ -45,6 +47,7 @@ partial void ProcessListRunStepsResponseContent( /// /// /// + /// /// The token to cancel the operation with /// [global::System.Diagnostics.CodeAnalysis.Experimental(diagnosticId: "G_BETA_001")] @@ -55,6 +58,7 @@ partial void ProcessListRunStepsResponseContent( global::G.ListRunStepsOrder? order = default, string? after = default, string? before = default, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default) { PrepareArguments( @@ -66,7 +70,8 @@ partial void ProcessListRunStepsResponseContent( limit: ref limit, order: ref order, after: ref after, - before: ref before); + before: ref before, + include: include); var __pathBuilder = new PathBuilder( path: $"/threads/{threadId}/runs/{runId}/steps", @@ -113,7 +118,8 @@ partial void ProcessListRunStepsResponseContent( limit: limit, order: order, after: after, - before: before); + before: before, + include: include); using var __response = await HttpClient.SendAsync( request: __httpRequest, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs index 1557e63f4f..ab7df37068 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyAssistant.g.verified.cs @@ -170,7 +170,7 @@ partial void ProcessModifyAssistantResponseContent( /// /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -182,28 +182,30 @@ partial void ProcessModifyAssistantResponseContent( /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -215,7 +217,7 @@ partial void ProcessModifyAssistantResponseContent( string? name = default, string? description = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.ModifyAssistantRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs index 076a836c4d..29b3523554 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyMessage.g.verified.cs @@ -177,7 +177,7 @@ partial void ProcessModifyMessageResponseContent( /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyRun.g.verified.cs index d0ca3967eb..99424df940 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyRun.g.verified.cs @@ -177,7 +177,7 @@ partial void ProcessModifyRunResponseContent( /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyThread.g.verified.cs index 3a405dd599..1386c69a57 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AssistantsClient.ModifyThread.g.verified.cs @@ -173,7 +173,7 @@ partial void ProcessModifyThreadResponseContent( /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateSpeech.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateSpeech.g.verified.cs index ed6629272f..55f246aa08 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateSpeech.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateSpeech.g.verified.cs @@ -153,13 +153,13 @@ partial void ProcessCreateSpeechResponseContent( /// Generates audio from the input text. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranscription.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranscription.g.verified.cs index 58cfddc5fe..901d0d7565 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranscription.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranscription.g.verified.cs @@ -208,10 +208,10 @@ partial void ProcessCreateTranscriptionResponseContent( /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -230,7 +230,7 @@ partial void ProcessCreateTranscriptionResponseContent( global::G.AnyOf model, string? language = default, string? prompt = default, - global::G.CreateTranscriptionRequestResponseFormat? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Collections.Generic.IList? timestampGranularities = default, global::System.Threading.CancellationToken cancellationToken = default) diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranslation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranslation.g.verified.cs index 35469908ae..7293b68005 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranslation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.AudioClient.CreateTranslation.g.verified.cs @@ -84,7 +84,7 @@ partial void ProcessCreateTranslationResponseContent( if (request.ResponseFormat != default) { __httpRequestContent.Add( - content: new global::System.Net.Http.StringContent($"{request.ResponseFormat}"), + content: new global::System.Net.Http.StringContent($"{request.ResponseFormat?.ToValueString()}"), name: "response_format"); } if (request.Temperature != default) @@ -193,10 +193,10 @@ partial void ProcessCreateTranslationResponseContent( /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -210,7 +210,7 @@ partial void ProcessCreateTranslationResponseContent( string filename, global::G.AnyOf model, string? prompt = default, - string? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Threading.CancellationToken cancellationToken = default) { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.BatchClient.CreateBatch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.BatchClient.CreateBatch.g.verified.cs index dfe3741bed..dc2976c669 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.BatchClient.CreateBatch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.BatchClient.CreateBatch.g.verified.cs @@ -164,7 +164,7 @@ partial void ProcessCreateBatchResponseContent( /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. /// /// /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs index e0ac1a648b..5ccd4f181d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ChatClient.CreateChatCompletion.g.verified.cs @@ -23,7 +23,9 @@ partial void ProcessCreateChatCompletionResponseContent( ref string content); /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// /// The token to cancel the operation with @@ -159,18 +161,32 @@ partial void ProcessCreateChatCompletionResponseContent( } /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -184,24 +200,42 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. /// - /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). /// /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// @@ -211,10 +245,12 @@ partial void ProcessCreateChatCompletionResponseContent( /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -250,10 +286,11 @@ partial void ProcessCreateChatCompletionResponseContent( /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with @@ -261,12 +298,17 @@ partial void ProcessCreateChatCompletionResponseContent( public async global::System.Threading.Tasks.Task CreateChatCompletionAsync( global::System.Collections.Generic.IList messages, global::G.AnyOf model, + bool? store = default, + global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = default, global::System.Collections.Generic.Dictionary? logitBias = default, bool? logprobs = default, int? topLogprobs = default, - int? maxTokens = default, + int? maxCompletionTokens = default, int? n = default, + global::System.Collections.Generic.IList? modalities = default, + global::G.PredictionContent? prediction = default, + global::G.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = default, global::G.ResponseFormat? responseFormat = default, int? seed = default, @@ -286,12 +328,17 @@ partial void ProcessCreateChatCompletionResponseContent( { Messages = messages, Model = model, + Store = store, + Metadata = metadata, FrequencyPenalty = frequencyPenalty, LogitBias = logitBias, Logprobs = logprobs, TopLogprobs = topLogprobs, - MaxTokens = maxTokens, + MaxCompletionTokens = maxCompletionTokens, N = n, + Modalities = modalities, + Prediction = prediction, + Audio = audio, PresencePenalty = presencePenalty, ResponseFormat = responseFormat, Seed = seed, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs index d31b065e44..520bb4a302 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.CompletionsClient.CreateCompletion.g.verified.cs @@ -162,7 +162,7 @@ partial void ProcessCreateCompletionResponseContent( /// Creates a completion for the provided prompt and parameters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -181,7 +181,7 @@ partial void ProcessCreateCompletionResponseContent( /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -207,7 +207,7 @@ partial void ProcessCreateCompletionResponseContent( /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -242,7 +242,7 @@ partial void ProcessCreateCompletionResponseContent( /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs index f69ea01d4a..e4266ccfe6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.EmbeddingsClient.CreateEmbedding.g.verified.cs @@ -166,7 +166,7 @@ partial void ProcessCreateEmbeddingResponseContent( /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -178,7 +178,7 @@ partial void ProcessCreateEmbeddingResponseContent( /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.CreateFile.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.CreateFile.g.verified.cs index 24017572e1..b8a8f94bdb 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.CreateFile.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.CreateFile.g.verified.cs @@ -26,7 +26,7 @@ partial void ProcessCreateFileResponseContent( /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// @@ -169,7 +169,7 @@ partial void ProcessCreateFileResponseContent( /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.ListFiles.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.ListFiles.g.verified.cs index e69fc56ee7..debe119958 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.ListFiles.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FilesClient.ListFiles.g.verified.cs @@ -8,11 +8,17 @@ public partial class FilesClient { partial void PrepareListFilesArguments( global::System.Net.Http.HttpClient httpClient, - ref string? purpose); + ref string? purpose, + ref int? limit, + ref global::G.ListFilesOrder? order, + ref string? after); partial void PrepareListFilesRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, - string? purpose); + string? purpose, + int? limit, + global::G.ListFilesOrder? order, + string? after); partial void ProcessListFilesResponse( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpResponseMessage httpResponseMessage); @@ -23,26 +29,42 @@ partial void ProcessListFilesResponseContent( ref string content); /// - /// Returns a list of files that belong to the user's organization. + /// Returns a list of files. /// /// + /// + /// Default Value: 10000 + /// + /// + /// Default Value: desc + /// + /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ListFilesAsync( string? purpose = default, + int? limit = default, + global::G.ListFilesOrder? order = default, + string? after = default, global::System.Threading.CancellationToken cancellationToken = default) { PrepareArguments( client: HttpClient); PrepareListFilesArguments( httpClient: HttpClient, - purpose: ref purpose); + purpose: ref purpose, + limit: ref limit, + order: ref order, + after: ref after); var __pathBuilder = new PathBuilder( path: "/files", baseUri: HttpClient.BaseAddress); __pathBuilder .AddOptionalParameter("purpose", purpose) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("order", order?.ToValueString()) + .AddOptionalParameter("after", after) ; var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -75,7 +97,10 @@ partial void ProcessListFilesResponseContent( PrepareListFilesRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, - purpose: purpose); + purpose: purpose, + limit: limit, + order: order, + after: after); using var __response = await HttpClient.SendAsync( request: __httpRequest, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs index 6fe985d9a4..a3ab885bd3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.FineTuningClient.CreateFineTuningJob.g.verified.cs @@ -167,7 +167,7 @@ partial void ProcessCreateFineTuningJobResponseContent( /// /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// @@ -182,7 +182,7 @@ partial void ProcessCreateFineTuningJobResponseContent( /// The hyperparameters used for the fine-tuning job. /// /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs index 959307295f..032d69cf9d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateAssistant.g.verified.cs @@ -20,7 +20,7 @@ public partial interface IAssistantsClient /// Create an assistant with a model and instructions. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -33,28 +33,30 @@ public partial interface IAssistantsClient /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs index 661d085810..e2f7803fd7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateMessage.g.verified.cs @@ -32,7 +32,7 @@ public partial interface IAssistantsClient /// A list of files attached to the message, and the tools they should be added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateRun.g.verified.cs index 314d02ee58..1797b4fea3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateRun.g.verified.cs @@ -9,6 +9,7 @@ public partial interface IAssistantsClient /// Create a run. /// /// + /// /// /// The token to cancel the operation with /// @@ -16,12 +17,14 @@ public partial interface IAssistantsClient global::System.Threading.Tasks.Task CreateRunAsync( string threadId, global::G.CreateRunRequest request, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default); /// /// Create a run. /// /// + /// /// /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. /// @@ -42,15 +45,16 @@ public partial interface IAssistantsClient /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -74,12 +78,13 @@ public partial interface IAssistantsClient /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -88,11 +93,12 @@ public partial interface IAssistantsClient global::System.Threading.Tasks.Task CreateRunAsync( string threadId, string assistantId, + global::System.Collections.Generic.IList? include = default, global::G.AnyOf? model = default, string? instructions = default, string? additionalInstructions = default, global::System.Collections.Generic.IList? additionalMessages = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, object? metadata = default, double? temperature = default, double? topP = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThread.g.verified.cs index b228b41386..f18044188b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThread.g.verified.cs @@ -26,7 +26,7 @@ public partial interface IAssistantsClient /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs index 1779aa305c..b3c5d8fca5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.CreateThreadAndRun.g.verified.cs @@ -37,15 +37,16 @@ public partial interface IAssistantsClient /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -69,12 +70,13 @@ public partial interface IAssistantsClient /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -85,7 +87,7 @@ public partial interface IAssistantsClient global::G.CreateThreadRequest? thread = default, global::G.AnyOf? model = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.CreateThreadAndRunRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs index 5abc27c12f..7adba78074 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.GetRunStep.g.verified.cs @@ -11,6 +11,7 @@ public partial interface IAssistantsClient /// /// /// + /// /// The token to cancel the operation with /// [global::System.Diagnostics.CodeAnalysis.Experimental(diagnosticId: "G_BETA_001")] @@ -18,6 +19,7 @@ public partial interface IAssistantsClient string threadId, string runId, string stepId, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs index ed44b222b6..396c225d2e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ListRunSteps.g.verified.cs @@ -18,6 +18,7 @@ public partial interface IAssistantsClient /// /// /// + /// /// The token to cancel the operation with /// [global::System.Diagnostics.CodeAnalysis.Experimental(diagnosticId: "G_BETA_001")] @@ -28,6 +29,7 @@ public partial interface IAssistantsClient global::G.ListRunStepsOrder? order = default, string? after = default, string? before = default, + global::System.Collections.Generic.IList? include = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs index 02af95a219..54fcb13c43 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyAssistant.g.verified.cs @@ -23,7 +23,7 @@ public partial interface IAssistantsClient /// /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -35,28 +35,30 @@ public partial interface IAssistantsClient /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// The token to cancel the operation with @@ -68,7 +70,7 @@ public partial interface IAssistantsClient string? name = default, string? description = default, string? instructions = default, - global::System.Collections.Generic.IList? tools = default, + global::System.Collections.Generic.IList? tools = default, global::G.ModifyAssistantRequestToolResources? toolResources = default, object? metadata = default, double? temperature = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs index 495656c3c1..ebff922613 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyMessage.g.verified.cs @@ -26,7 +26,7 @@ public partial interface IAssistantsClient /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs index 28fed01c2c..bd083e236e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyRun.g.verified.cs @@ -26,7 +26,7 @@ public partial interface IAssistantsClient /// /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs index 0f98a208bf..96bd75e3b6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAssistantsClient.ModifyThread.g.verified.cs @@ -26,7 +26,7 @@ public partial interface IAssistantsClient /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateSpeech.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateSpeech.g.verified.cs index d9eec0f9a3..1d8f35d201 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateSpeech.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateSpeech.g.verified.cs @@ -19,13 +19,13 @@ public partial interface IAudioClient /// Generates audio from the input text. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranscription.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranscription.g.verified.cs index 2dbc484983..a7ba0231fe 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranscription.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranscription.g.verified.cs @@ -32,10 +32,10 @@ public partial interface IAudioClient /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -54,7 +54,7 @@ public partial interface IAudioClient global::G.AnyOf model, string? language = default, string? prompt = default, - global::G.CreateTranscriptionRequestResponseFormat? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Collections.Generic.IList? timestampGranularities = default, global::System.Threading.CancellationToken cancellationToken = default); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranslation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranslation.g.verified.cs index 15a6423711..f564fb5835 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranslation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IAudioClient.CreateTranslation.g.verified.cs @@ -29,10 +29,10 @@ public partial interface IAudioClient /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -46,7 +46,7 @@ public partial interface IAudioClient string filename, global::G.AnyOf model, string? prompt = default, - string? responseFormat = default, + global::G.AudioResponseFormat? responseFormat = default, double? temperature = default, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IBatchClient.CreateBatch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IBatchClient.CreateBatch.g.verified.cs index 438904dc82..f91e31bcd4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IBatchClient.CreateBatch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IBatchClient.CreateBatch.g.verified.cs @@ -21,7 +21,7 @@ public partial interface IBatchClient /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. /// /// /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs index 40374c5b83..1208ddb1c7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IChatClient.CreateChatCompletion.g.verified.cs @@ -6,7 +6,9 @@ namespace G public partial interface IChatClient { /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// /// The token to cancel the operation with @@ -16,18 +18,32 @@ public partial interface IChatClient global::System.Threading.CancellationToken cancellationToken = default); /// - /// Creates a model response for the given chat conversation. + /// Creates a model response for the given chat conversation. Learn more in the
+ /// [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),
+ /// and [audio](/docs/guides/audio) guides. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -41,24 +57,42 @@ public partial interface IChatClient /// /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. /// - /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). /// /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// @@ -68,10 +102,12 @@ public partial interface IChatClient /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -107,10 +143,11 @@ public partial interface IChatClient /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with @@ -118,12 +155,17 @@ public partial interface IChatClient global::System.Threading.Tasks.Task CreateChatCompletionAsync( global::System.Collections.Generic.IList messages, global::G.AnyOf model, + bool? store = default, + global::System.Collections.Generic.Dictionary? metadata = default, double? frequencyPenalty = default, global::System.Collections.Generic.Dictionary? logitBias = default, bool? logprobs = default, int? topLogprobs = default, - int? maxTokens = default, + int? maxCompletionTokens = default, int? n = default, + global::System.Collections.Generic.IList? modalities = default, + global::G.PredictionContent? prediction = default, + global::G.CreateChatCompletionRequestAudio? audio = default, double? presencePenalty = default, global::G.ResponseFormat? responseFormat = default, int? seed = default, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs index 37272bff75..c732eab3a6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ICompletionsClient.CreateCompletion.g.verified.cs @@ -19,7 +19,7 @@ public partial interface ICompletionsClient /// Creates a completion for the provided prompt and parameters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -38,7 +38,7 @@ public partial interface ICompletionsClient /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -64,7 +64,7 @@ public partial interface ICompletionsClient /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -99,7 +99,7 @@ public partial interface ICompletionsClient /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs index 28ba9a7375..b2bb84e27d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IEmbeddingsClient.CreateEmbedding.g.verified.cs @@ -23,7 +23,7 @@ public partial interface IEmbeddingsClient /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -35,7 +35,7 @@ public partial interface IEmbeddingsClient /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.CreateFile.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.CreateFile.g.verified.cs index 5a05982dd5..1d164a64ab 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.CreateFile.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.CreateFile.g.verified.cs @@ -9,7 +9,7 @@ public partial interface IFilesClient /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// @@ -23,7 +23,7 @@ public partial interface IFilesClient /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.
- /// The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
+ /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.ListFiles.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.ListFiles.g.verified.cs index cc2eab9d69..e2c309e8e6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.ListFiles.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFilesClient.ListFiles.g.verified.cs @@ -6,13 +6,23 @@ namespace G public partial interface IFilesClient { /// - /// Returns a list of files that belong to the user's organization. + /// Returns a list of files. /// /// + /// + /// Default Value: 10000 + /// + /// + /// Default Value: desc + /// + /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ListFilesAsync( string? purpose = default, + int? limit = default, + global::G.ListFilesOrder? order = default, + string? after = default, global::System.Threading.CancellationToken cancellationToken = default); } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs index f8a8b50942..5abc7ac681 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IFineTuningClient.CreateFineTuningJob.g.verified.cs @@ -24,7 +24,7 @@ public partial interface IFineTuningClient /// /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// @@ -39,7 +39,7 @@ public partial interface IFineTuningClient /// The hyperparameters used for the fine-tuning job. /// /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImage.g.verified.cs index 1b37a636ca..1e547983ae 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImage.g.verified.cs @@ -53,7 +53,7 @@ public partial interface IImagesClient /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs index 3085e2750a..a483f69261 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageEdit.g.verified.cs @@ -55,7 +55,7 @@ public partial interface IImagesClient /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs index 03b886e52f..cfd7a6351b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IImagesClient.CreateImageVariation.g.verified.cs @@ -45,7 +45,7 @@ public partial interface IImagesClient /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModelsClient.RetrieveModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModelsClient.RetrieveModel.g.verified.cs index ca76ad57bf..67c75ae00b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModelsClient.RetrieveModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModelsClient.RetrieveModel.g.verified.cs @@ -13,7 +13,7 @@ public partial interface IModelsClient /// /// The token to cancel the operation with /// - global::System.Threading.Tasks.Task RetrieveModelAsync( + global::System.Threading.Tasks.Task RetrieveModelAsync( string model, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.CreateModeration.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.CreateModeration.g.verified.cs index 295dbd10f4..764d6718ec 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.CreateModeration.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.CreateModeration.g.verified.cs @@ -6,7 +6,8 @@ namespace G public partial interface IModerationsClient { /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// /// The token to cancel the operation with @@ -16,21 +17,24 @@ public partial interface IModerationsClient global::System.Threading.CancellationToken cancellationToken = default); /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. /// /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task CreateModerationAsync( - global::G.OneOf> input, + global::G.OneOf, global::System.Collections.Generic.IList> input, global::G.AnyOf? model = default, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.g.verified.cs index 9708fd6a71..fc001e21b1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IModerationsClient.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Given a input text, outputs if the model classifies it as potentially harmful.
+ /// Given text and/or image inputs, classifies if those inputs are potentially harmful.
/// If no httpClient is provided, a new one will be created.
/// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. ///
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IOpenAiClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IOpenAiClient.g.verified.cs index b7b7053694..7e2e5ac9b8 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IOpenAiClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IOpenAiClient.g.verified.cs @@ -94,7 +94,7 @@ public partial interface IOpenAiClient : global::System.IDisposable public ModelsClient Models { get; } /// - /// Given a input text, outputs if the model classifies it as potentially harmful. + /// Given text and/or image inputs, classifies if those inputs are potentially harmful. /// public ModerationsClient Moderations { get; } @@ -106,13 +106,18 @@ public partial interface IOpenAiClient : global::System.IDisposable /// /// /// - public VectorStoresClient VectorStores { get; } + public UsageClient Usage { get; } /// /// /// public InvitesClient Invites { get; } + /// + /// + /// + public ProjectsClient Projects { get; } + /// /// /// @@ -121,7 +126,7 @@ public partial interface IOpenAiClient : global::System.IDisposable /// /// /// - public ProjectsClient Projects { get; } + public VectorStoresClient VectorStores { get; } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ListProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ListProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..bfdce37006 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ListProjectRateLimits.g.verified.cs @@ -0,0 +1,26 @@ +//HintName: G.IProjectsClient.ListProjectRateLimits.g.cs +#nullable enable + +namespace G +{ + public partial interface IProjectsClient + { + /// + /// Returns the rate limits per model for a project. + /// + /// + /// + /// Default Value: 100 + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task ListProjectRateLimitsAsync( + string projectId, + int? limit = default, + string? after = default, + string? before = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProject.g.verified.cs index 6004bae4ce..618fbedc84 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProject.g.verified.cs @@ -8,22 +8,26 @@ public partial interface IProjectsClient /// /// Modifies a project in the organization. /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, global::G.ProjectUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a project in the organization. /// + /// /// /// The updated name of the project, this name appears in reports. /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, string name, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs index b0a947b701..a23110ac65 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.ModifyProjectUser.g.verified.cs @@ -8,22 +8,30 @@ public partial interface IProjectsClient /// /// Modifies a user's role in the project. /// + /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a user's role in the project. /// + /// + /// /// /// `owner` or `member` /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.UpdateProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.UpdateProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..347e85901d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IProjectsClient.UpdateProjectRateLimits.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.IProjectsClient.UpdateProjectRateLimits.g.cs +#nullable enable + +namespace G +{ + public partial interface IProjectsClient + { + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request, + global::System.Threading.CancellationToken cancellationToken = default); + + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + int? maxRequestsPer1Minute = default, + int? maxTokensPer1Minute = default, + int? maxImagesPer1Minute = default, + int? maxAudioMegabytesPer1Minute = default, + int? maxRequestsPer1Day = default, + int? batch1DayMaxInputTokens = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUploadsClient.CreateUpload.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUploadsClient.CreateUpload.g.verified.cs index 127b8ce96a..40bea117be 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUploadsClient.CreateUpload.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUploadsClient.CreateUpload.g.verified.cs @@ -9,7 +9,7 @@ public partial interface IUploadsClient /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// @@ -23,7 +23,7 @@ public partial interface IUploadsClient /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageAudioSpeeches.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageAudioSpeeches.g.verified.cs new file mode 100644 index 0000000000..c171fdadcc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageAudioSpeeches.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageAudioSpeeches.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get audio speeches usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageAudioSpeechesAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioSpeechesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageAudioTranscriptions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageAudioTranscriptions.g.verified.cs new file mode 100644 index 0000000000..2626dd8499 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageAudioTranscriptions.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageAudioTranscriptions.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get audio transcriptions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageAudioTranscriptionsAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCodeInterpreterSessions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCodeInterpreterSessions.g.verified.cs new file mode 100644 index 0000000000..c6aaf40e5d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCodeInterpreterSessions.g.verified.cs @@ -0,0 +1,32 @@ +//HintName: G.IUsageClient.UsageCodeInterpreterSessions.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get code interpreter sessions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageCodeInterpreterSessionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCompletions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCompletions.g.verified.cs new file mode 100644 index 0000000000..547acd02b9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCompletions.g.verified.cs @@ -0,0 +1,40 @@ +//HintName: G.IUsageClient.UsageCompletions.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get completions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageCompletionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCompletionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + bool? batch = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCosts.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCosts.g.verified.cs new file mode 100644 index 0000000000..96ebd4b506 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageCosts.g.verified.cs @@ -0,0 +1,34 @@ +//HintName: G.IUsageClient.UsageCosts.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get costs details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// Default Value: 7 + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageCostsAsync( + int startTime, + int? endTime = default, + global::G.UsageCostsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageEmbeddings.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageEmbeddings.g.verified.cs new file mode 100644 index 0000000000..4445eddb06 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageEmbeddings.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageEmbeddings.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get embeddings usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageEmbeddingsAsync( + int startTime, + int? endTime = default, + global::G.UsageEmbeddingsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageImages.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageImages.g.verified.cs new file mode 100644 index 0000000000..687af2a335 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageImages.g.verified.cs @@ -0,0 +1,42 @@ +//HintName: G.IUsageClient.UsageImages.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get images usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageImagesAsync( + int startTime, + int? endTime = default, + global::G.UsageImagesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? sources = default, + global::System.Collections.Generic.IList? sizes = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageModerations.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageModerations.g.verified.cs new file mode 100644 index 0000000000..f8afc70d6a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageModerations.g.verified.cs @@ -0,0 +1,38 @@ +//HintName: G.IUsageClient.UsageModerations.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get moderations usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageModerationsAsync( + int startTime, + int? endTime = default, + global::G.UsageModerationsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageVectorStores.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageVectorStores.g.verified.cs new file mode 100644 index 0000000000..0bd84589db --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.UsageVectorStores.g.verified.cs @@ -0,0 +1,32 @@ +//HintName: G.IUsageClient.UsageVectorStores.g.cs +#nullable enable + +namespace G +{ + public partial interface IUsageClient + { + /// + /// Get vector stores usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + global::System.Threading.Tasks.Task UsageVectorStoresAsync( + int startTime, + int? endTime = default, + global::G.UsageVectorStoresBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.g.verified.cs new file mode 100644 index 0000000000..8605ac04ee --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsageClient.g.verified.cs @@ -0,0 +1,41 @@ +//HintName: G.IUsageClient.g.cs + +#nullable enable + +namespace G +{ + /// + /// If no httpClient is provided, a new one will be created.
+ /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + ///
+ public partial interface IUsageClient : global::System.IDisposable + { + /// + /// The HttpClient instance. + /// + public global::System.Net.Http.HttpClient HttpClient { get; } + + /// + /// The base URL for the API. + /// + public System.Uri? BaseUri { get; } + + /// + /// The authorizations to use for the requests. + /// + public global::System.Collections.Generic.List Authorizations { get; } + + /// + /// Gets or sets a value indicating whether the response content should be read as a string. + /// True by default in debug builds, false otherwise. + /// + public bool ReadResponseAsString { get; set; } + + /// + /// + /// + global::System.Text.Json.Serialization.JsonSerializerContext JsonSerializerContext { get; set; } + + + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsersClient.ModifyUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsersClient.ModifyUser.g.verified.cs index 9052ca9347..fa2bdf58e5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsersClient.ModifyUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IUsersClient.ModifyUser.g.verified.cs @@ -8,22 +8,26 @@ public partial interface IUsersClient /// /// Modifies a user's role in the organization. /// + /// /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default); /// /// Modifies a user's role in the organization. /// + /// /// /// `owner` or `reader` /// /// The token to cancel the operation with /// global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs index dcec61501d..251b16739b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.CreateVectorStore.g.verified.cs @@ -32,7 +32,7 @@ public partial interface IVectorStoresClient /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs index 05b3909aab..289afdd13c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.IVectorStoresClient.ModifyVectorStore.g.verified.cs @@ -29,7 +29,7 @@ public partial interface IVectorStoresClient /// The expiration policy for a vector store. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImage.g.verified.cs index 68da9862d1..e9748caba3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImage.g.verified.cs @@ -196,7 +196,7 @@ partial void ProcessCreateImageResponseContent( /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs index 75252aa751..589f8b8ba6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageEdit.g.verified.cs @@ -238,7 +238,7 @@ partial void ProcessCreateImageEditResponseContent( /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs index b2be7f42c6..4f1c3f9ec1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ImagesClient.CreateImageVariation.g.verified.cs @@ -218,7 +218,7 @@ partial void ProcessCreateImageVariationResponseContent( /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// /// The token to cancel the operation with diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObject.g.verified.cs index ebb9f543b5..3723566109 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObject.g.verified.cs @@ -46,7 +46,7 @@ public sealed partial class AssistantObject public required string? Description { get; set; } /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] [global::System.Text.Json.Serialization.JsonRequired] @@ -60,7 +60,8 @@ public sealed partial class AssistantObject public required string? Instructions { get; set; } /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("tools")] [global::System.Text.Json.Serialization.JsonRequired] @@ -73,7 +74,7 @@ public sealed partial class AssistantObject public global::G.AssistantObjectToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] [global::System.Text.Json.Serialization.JsonRequired] @@ -99,9 +100,9 @@ public sealed partial class AssistantObject public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] @@ -133,19 +134,20 @@ public sealed partial class AssistantObject /// The description of the assistant. The maximum length is 512 characters. /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
@@ -159,9 +161,9 @@ public sealed partial class AssistantObject /// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectMetadata.g.verified.cs index 9cada39bd4..1b100cd670 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class AssistantObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs index 0099cb4783..31604eeace 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantObjectToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class AssistantObjectToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class AssistantObjectToolResourcesCodeInterpreter /// Initializes a new instance of the class. /// /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public AssistantObjectToolResourcesCodeInterpreter( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEvent.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEvent.g.verified.cs index 4a3f9be760..3caffd0c6d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEvent.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEvent.g.verified.cs @@ -30,7 +30,7 @@ namespace G public global::G.AssistantStreamEventDiscriminatorEvent? Event { get; } /// - /// Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + /// Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. /// #if NET6_0_OR_GREATER public global::G.ErrorEvent? Error { get; init; } @@ -485,7 +485,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant13? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant14? ThreadRunStepCreated { get; init; } @@ -520,7 +520,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant14? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant15? ThreadRunStepInProgress { get; init; } @@ -555,7 +555,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant15? value) } /// - /// Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + /// Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant16? ThreadRunStepDelta { get; init; } @@ -590,7 +590,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant16? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant17? ThreadRunStepCompleted { get; init; } @@ -625,7 +625,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant17? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant18? ThreadRunStepFailed { get; init; } @@ -660,7 +660,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant18? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant19? ThreadRunStepCancelled { get; init; } @@ -695,7 +695,7 @@ public AssistantStreamEvent(global::G.AssistantStreamEventVariant19? value) } /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. /// #if NET6_0_OR_GREATER public global::G.AssistantStreamEventVariant20? ThreadRunStepExpired { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs index d001128bbf..e9bcd3f3e4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant14.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created. /// public sealed partial class AssistantStreamEventVariant14 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs index 0555d29578..1170318055 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant15.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. /// public sealed partial class AssistantStreamEventVariant15 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs index 274200aa3a..5d6801e50c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant16.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + /// Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed. /// public sealed partial class AssistantStreamEventVariant16 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs index 6044be11d2..568bc3de9a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant17.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed. /// public sealed partial class AssistantStreamEventVariant17 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs index 59c7191218..eea48e2902 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant18.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails. /// public sealed partial class AssistantStreamEventVariant18 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs index 26df32d64e..210a003da3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant19.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled. /// public sealed partial class AssistantStreamEventVariant19 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs index 6d52ecf812..be2cecbf58 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant20.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + /// Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires. /// public sealed partial class AssistantStreamEventVariant20 { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs index 8fbf0cb9ed..84c6cb4ec4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantStreamEventVariant3.g.verified.cs @@ -9,6 +9,12 @@ namespace G /// public sealed partial class AssistantStreamEventVariant3 { + /// + /// Whether to enable input audio transcription. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("enabled")] + public bool? Enabled { get; set; } + /// /// /// @@ -32,6 +38,9 @@ public sealed partial class AssistantStreamEventVariant3 /// /// Initializes a new instance of the class. /// + /// + /// Whether to enable input audio transcription. + /// /// /// /// Represents a thread that contains [messages](/docs/api-reference/messages). @@ -39,9 +48,11 @@ public sealed partial class AssistantStreamEventVariant3 [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public AssistantStreamEventVariant3( global::G.ThreadObject data, + bool? enabled, global::G.AssistantStreamEventVariant3Event @event) { this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.Enabled = enabled; this.Event = @event; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs index 3e755430a5..b1c5f62dd8 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantToolsFileSearchFileSearch.g.verified.cs @@ -11,11 +11,18 @@ public sealed partial class AssistantToolsFileSearchFileSearch { /// /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.
- /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. ///
[global::System.Text.Json.Serialization.JsonPropertyName("max_num_results")] public int? MaxNumResults { get; set; } + /// + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
+ /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("ranking_options")] + public global::G.FileSearchRankingOptions? RankingOptions { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -27,13 +34,19 @@ public sealed partial class AssistantToolsFileSearchFileSearch /// /// /// The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.
- /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information. + /// Note that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + /// + /// + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
+ /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public AssistantToolsFileSearchFileSearch( - int? maxNumResults) + int? maxNumResults, + global::G.FileSearchRankingOptions? rankingOptions) { this.MaxNumResults = maxNumResults; + this.RankingOptions = rankingOptions; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs index 6e03692459..573a4c7ee3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AssistantsApiResponseFormatOption.g.verified.cs @@ -6,9 +6,9 @@ namespace G { /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
public readonly partial struct AssistantsApiResponseFormatOption : global::System.IEquatable diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranscriptionRequestResponseFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AudioResponseFormat.g.verified.cs similarity index 50% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranscriptionRequestResponseFormat.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AudioResponseFormat.g.verified.cs index 7cbb807492..daedac40f1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranscriptionRequestResponseFormat.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AudioResponseFormat.g.verified.cs @@ -1,14 +1,14 @@ -//HintName: G.Models.CreateTranscriptionRequestResponseFormat.g.cs +//HintName: G.Models.AudioResponseFormat.g.cs #nullable enable namespace G { /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json ///
- public enum CreateTranscriptionRequestResponseFormat + public enum AudioResponseFormat { /// /// `json`, `text`, `srt`, `verbose_json`, or `vtt`. @@ -35,35 +35,35 @@ public enum CreateTranscriptionRequestResponseFormat /// /// Enum extensions to do fast conversions without the reflection. /// - public static class CreateTranscriptionRequestResponseFormatExtensions + public static class AudioResponseFormatExtensions { /// /// Converts an enum to a string. /// - public static string ToValueString(this CreateTranscriptionRequestResponseFormat value) + public static string ToValueString(this AudioResponseFormat value) { return value switch { - CreateTranscriptionRequestResponseFormat.Json => "json", - CreateTranscriptionRequestResponseFormat.Text => "text", - CreateTranscriptionRequestResponseFormat.Srt => "srt", - CreateTranscriptionRequestResponseFormat.VerboseJson => "verbose_json", - CreateTranscriptionRequestResponseFormat.Vtt => "vtt", + AudioResponseFormat.Json => "json", + AudioResponseFormat.Text => "text", + AudioResponseFormat.Srt => "srt", + AudioResponseFormat.VerboseJson => "verbose_json", + AudioResponseFormat.Vtt => "vtt", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } /// /// Converts an string to a enum. /// - public static CreateTranscriptionRequestResponseFormat? ToEnum(string value) + public static AudioResponseFormat? ToEnum(string value) { return value switch { - "json" => CreateTranscriptionRequestResponseFormat.Json, - "text" => CreateTranscriptionRequestResponseFormat.Text, - "srt" => CreateTranscriptionRequestResponseFormat.Srt, - "verbose_json" => CreateTranscriptionRequestResponseFormat.VerboseJson, - "vtt" => CreateTranscriptionRequestResponseFormat.Vtt, + "json" => AudioResponseFormat.Json, + "text" => AudioResponseFormat.Text, + "srt" => AudioResponseFormat.Srt, + "verbose_json" => AudioResponseFormat.VerboseJson, + "vtt" => AudioResponseFormat.Vtt, _ => null, }; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLog.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLog.g.verified.cs index c81a71278d..7606ce5962 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLog.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLog.g.verified.cs @@ -117,6 +117,18 @@ public sealed partial class AuditLog [global::System.Text.Json.Serialization.JsonPropertyName("project.archived")] public global::G.AuditLogProjectArchived? ProjectArchived { get; set; } + /// + /// The details for events with this `type`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("rate_limit.updated")] + public global::G.AuditLogRateLimitUpdated? RateLimitUpdated { get; set; } + + /// + /// The details for events with this `type`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("rate_limit.deleted")] + public global::G.AuditLogRateLimitDeleted? RateLimitDeleted { get; set; } + /// /// The details for events with this `type`. /// @@ -213,6 +225,12 @@ public sealed partial class AuditLog /// /// The details for events with this `type`. /// + /// + /// The details for events with this `type`. + /// + /// + /// The details for events with this `type`. + /// /// /// The details for events with this `type`. /// @@ -250,6 +268,8 @@ public AuditLog( global::G.AuditLogProjectCreated? projectCreated, global::G.AuditLogProjectUpdated? projectUpdated, global::G.AuditLogProjectArchived? projectArchived, + global::G.AuditLogRateLimitUpdated? rateLimitUpdated, + global::G.AuditLogRateLimitDeleted? rateLimitDeleted, global::G.AuditLogServiceAccountCreated? serviceAccountCreated, global::G.AuditLogServiceAccountUpdated? serviceAccountUpdated, global::G.AuditLogServiceAccountDeleted? serviceAccountDeleted, @@ -274,6 +294,8 @@ public AuditLog( this.ProjectCreated = projectCreated; this.ProjectUpdated = projectUpdated; this.ProjectArchived = projectArchived; + this.RateLimitUpdated = rateLimitUpdated; + this.RateLimitDeleted = rateLimitDeleted; this.ServiceAccountCreated = serviceAccountCreated; this.ServiceAccountUpdated = serviceAccountUpdated; this.ServiceAccountDeleted = serviceAccountDeleted; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogEventType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogEventType.g.verified.cs index f74028c485..641c13d22e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogEventType.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogEventType.g.verified.cs @@ -80,6 +80,14 @@ public enum AuditLogEventType /// /// /// + RateLimitUpdated, + /// + /// + /// + RateLimitDeleted, + /// + /// + /// UserAdded, /// /// @@ -120,6 +128,8 @@ public static string ToValueString(this AuditLogEventType value) AuditLogEventType.ServiceAccountCreated => "service_account.created", AuditLogEventType.ServiceAccountUpdated => "service_account.updated", AuditLogEventType.ServiceAccountDeleted => "service_account.deleted", + AuditLogEventType.RateLimitUpdated => "rate_limit.updated", + AuditLogEventType.RateLimitDeleted => "rate_limit.deleted", AuditLogEventType.UserAdded => "user.added", AuditLogEventType.UserUpdated => "user.updated", AuditLogEventType.UserDeleted => "user.deleted", @@ -150,6 +160,8 @@ public static string ToValueString(this AuditLogEventType value) "service_account.created" => AuditLogEventType.ServiceAccountCreated, "service_account.updated" => AuditLogEventType.ServiceAccountUpdated, "service_account.deleted" => AuditLogEventType.ServiceAccountDeleted, + "rate_limit.updated" => AuditLogEventType.RateLimitUpdated, + "rate_limit.deleted" => AuditLogEventType.RateLimitDeleted, "user.added" => AuditLogEventType.UserAdded, "user.updated" => AuditLogEventType.UserUpdated, "user.deleted" => AuditLogEventType.UserDeleted, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitDeleted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitDeleted.Json.g.verified.cs new file mode 100644 index 0000000000..fa8ca64b9b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitDeleted.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.AuditLogRateLimitDeleted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class AuditLogRateLimitDeleted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.AuditLogRateLimitDeleted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.AuditLogRateLimitDeleted), + jsonSerializerContext) as global::G.AuditLogRateLimitDeleted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.AuditLogRateLimitDeleted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.AuditLogRateLimitDeleted), + jsonSerializerContext).ConfigureAwait(false)) as global::G.AuditLogRateLimitDeleted; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitDeleted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitDeleted.g.verified.cs new file mode 100644 index 0000000000..ef08505ccd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitDeleted.g.verified.cs @@ -0,0 +1,44 @@ +//HintName: G.Models.AuditLogRateLimitDeleted.g.cs + +#nullable enable + +namespace G +{ + /// + /// The details for events with this `type`. + /// + public sealed partial class AuditLogRateLimitDeleted + { + /// + /// The rate limit ID + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The rate limit ID + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public AuditLogRateLimitDeleted( + string? id) + { + this.Id = id; + } + + /// + /// Initializes a new instance of the class. + /// + public AuditLogRateLimitDeleted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdated.Json.g.verified.cs new file mode 100644 index 0000000000..9c0d7d9b91 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.AuditLogRateLimitUpdated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class AuditLogRateLimitUpdated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.AuditLogRateLimitUpdated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.AuditLogRateLimitUpdated), + jsonSerializerContext) as global::G.AuditLogRateLimitUpdated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.AuditLogRateLimitUpdated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.AuditLogRateLimitUpdated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.AuditLogRateLimitUpdated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdated.g.verified.cs new file mode 100644 index 0000000000..a97136dadb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdated.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.AuditLogRateLimitUpdated.g.cs + +#nullable enable + +namespace G +{ + /// + /// The details for events with this `type`. + /// + public sealed partial class AuditLogRateLimitUpdated + { + /// + /// The rate limit ID + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The payload used to update the rate limits. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("changes_requested")] + public global::G.AuditLogRateLimitUpdatedChangesRequested? ChangesRequested { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The rate limit ID + /// + /// + /// The payload used to update the rate limits. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public AuditLogRateLimitUpdated( + string? id, + global::G.AuditLogRateLimitUpdatedChangesRequested? changesRequested) + { + this.Id = id; + this.ChangesRequested = changesRequested; + } + + /// + /// Initializes a new instance of the class. + /// + public AuditLogRateLimitUpdated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.verified.cs new file mode 100644 index 0000000000..b42e77bd60 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.AuditLogRateLimitUpdatedChangesRequested.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class AuditLogRateLimitUpdatedChangesRequested + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.AuditLogRateLimitUpdatedChangesRequested? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.AuditLogRateLimitUpdatedChangesRequested), + jsonSerializerContext) as global::G.AuditLogRateLimitUpdatedChangesRequested; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.AuditLogRateLimitUpdatedChangesRequested? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.AuditLogRateLimitUpdatedChangesRequested), + jsonSerializerContext).ConfigureAwait(false)) as global::G.AuditLogRateLimitUpdatedChangesRequested; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.g.verified.cs new file mode 100644 index 0000000000..51fc83087e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.AuditLogRateLimitUpdatedChangesRequested.g.verified.cs @@ -0,0 +1,99 @@ +//HintName: G.Models.AuditLogRateLimitUpdatedChangesRequested.g.cs + +#nullable enable + +namespace G +{ + /// + /// The payload used to update the rate limits. + /// + public sealed partial class AuditLogRateLimitUpdatedChangesRequested + { + /// + /// The maximum requests per minute. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_requests_per_1_minute")] + public int? MaxRequestsPer1Minute { get; set; } + + /// + /// The maximum tokens per minute. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_tokens_per_1_minute")] + public int? MaxTokensPer1Minute { get; set; } + + /// + /// The maximum images per minute. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_images_per_1_minute")] + public int? MaxImagesPer1Minute { get; set; } + + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_audio_megabytes_per_1_minute")] + public int? MaxAudioMegabytesPer1Minute { get; set; } + + /// + /// The maximum requests per day. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_requests_per_1_day")] + public int? MaxRequestsPer1Day { get; set; } + + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("batch_1_day_max_input_tokens")] + public int? Batch1DayMaxInputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public AuditLogRateLimitUpdatedChangesRequested( + int? maxRequestsPer1Minute, + int? maxTokensPer1Minute, + int? maxImagesPer1Minute, + int? maxAudioMegabytesPer1Minute, + int? maxRequestsPer1Day, + int? batch1DayMaxInputTokens) + { + this.MaxRequestsPer1Minute = maxRequestsPer1Minute; + this.MaxTokensPer1Minute = maxTokensPer1Minute; + this.MaxImagesPer1Minute = maxImagesPer1Minute; + this.MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute; + this.MaxRequestsPer1Day = maxRequestsPer1Day; + this.Batch1DayMaxInputTokens = batch1DayMaxInputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public AuditLogRateLimitUpdatedChangesRequested() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Batch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Batch.g.verified.cs index c27300b59d..2e0943974e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Batch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Batch.g.verified.cs @@ -141,7 +141,7 @@ public sealed partial class Batch public global::G.BatchRequestCounts? RequestCounts { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -209,7 +209,7 @@ public sealed partial class Batch /// The request counts for different statuses within the batch. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public Batch( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.BatchMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.BatchMetadata.g.verified.cs index 7186a89f36..0f0eef9d1b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.BatchMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.BatchMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class BatchMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionModalitie.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionModalitie.g.verified.cs new file mode 100644 index 0000000000..01e502708c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionModalitie.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.ChatCompletionModalitie.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum ChatCompletionModalitie + { + /// + /// + /// + Text, + /// + /// + /// + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionModalitie value) + { + return value switch + { + ChatCompletionModalitie.Text => "text", + ChatCompletionModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionModalitie? ToEnum(string value) + { + return value switch + { + "text" => ChatCompletionModalitie.Text, + "audio" => ChatCompletionModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs index 2aef94a536..b500c481e0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessage.g.verified.cs @@ -37,6 +37,13 @@ public sealed partial class ChatCompletionRequestAssistantMessage [global::System.Text.Json.Serialization.JsonPropertyName("name")] public string? Name { get; set; } + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public global::G.ChatCompletionRequestAssistantMessageAudio? Audio { get; set; } + /// /// The tool calls generated by the model, such as function calls. /// @@ -71,6 +78,10 @@ public sealed partial class ChatCompletionRequestAssistantMessage /// /// An optional name for the participant. Provides the model information to differentiate between participants of the same role. /// + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + /// /// /// The tool calls generated by the model, such as function calls. /// @@ -80,12 +91,14 @@ public ChatCompletionRequestAssistantMessage( string? refusal, global::G.ChatCompletionRequestAssistantMessageRole role, string? name, + global::G.ChatCompletionRequestAssistantMessageAudio? audio, global::System.Collections.Generic.IList? toolCalls) { this.Content = content; this.Refusal = refusal; this.Role = role; this.Name = name; + this.Audio = audio; this.ToolCalls = toolCalls; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.verified.cs new file mode 100644 index 0000000000..ae4d008cab --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ChatCompletionRequestAssistantMessageAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionRequestAssistantMessageAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ChatCompletionRequestAssistantMessageAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ChatCompletionRequestAssistantMessageAudio), + jsonSerializerContext) as global::G.ChatCompletionRequestAssistantMessageAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionRequestAssistantMessageAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ChatCompletionRequestAssistantMessageAudio), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ChatCompletionRequestAssistantMessageAudio; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.g.verified.cs new file mode 100644 index 0000000000..80ac87aecb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageAudio.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.ChatCompletionRequestAssistantMessageAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// Data about a previous audio response from the model.
+ /// [Learn more](/docs/guides/audio). + ///
+ public sealed partial class ChatCompletionRequestAssistantMessageAudio + { + /// + /// Unique identifier for a previous audio response from the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Id { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Unique identifier for a previous audio response from the model. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ChatCompletionRequestAssistantMessageAudio( + string id) + { + this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionRequestAssistantMessageAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs index abe3683784..32db3c27d6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestAssistantMessageContentPart.g.verified.cs @@ -16,7 +16,7 @@ namespace G public global::G.ChatCompletionRequestAssistantMessageContentPartDiscriminatorType? Type { get; } /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.verified.cs new file mode 100644 index 0000000000..95187240fc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionRequestMessageContentPartAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ChatCompletionRequestMessageContentPartAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ChatCompletionRequestMessageContentPartAudio), + jsonSerializerContext) as global::G.ChatCompletionRequestMessageContentPartAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionRequestMessageContentPartAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ChatCompletionRequestMessageContentPartAudio), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ChatCompletionRequestMessageContentPartAudio; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.g.verified.cs new file mode 100644 index 0000000000..91045a904a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudio.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// Learn about [audio inputs](/docs/guides/audio). + /// + public sealed partial class ChatCompletionRequestMessageContentPartAudio + { + /// + /// The type of the content part. Always `input_audio`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeJsonConverter))] + public global::G.ChatCompletionRequestMessageContentPartAudioType Type { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.ChatCompletionRequestMessageContentPartAudioInputAudio InputAudio { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the content part. Always `input_audio`. + /// + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ChatCompletionRequestMessageContentPartAudio( + global::G.ChatCompletionRequestMessageContentPartAudioInputAudio inputAudio, + global::G.ChatCompletionRequestMessageContentPartAudioType type) + { + this.InputAudio = inputAudio ?? throw new global::System.ArgumentNullException(nameof(inputAudio)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionRequestMessageContentPartAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.verified.cs new file mode 100644 index 0000000000..f5047c450a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionRequestMessageContentPartAudioInputAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ChatCompletionRequestMessageContentPartAudioInputAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ChatCompletionRequestMessageContentPartAudioInputAudio), + jsonSerializerContext) as global::G.ChatCompletionRequestMessageContentPartAudioInputAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionRequestMessageContentPartAudioInputAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ChatCompletionRequestMessageContentPartAudioInputAudio), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ChatCompletionRequestMessageContentPartAudioInputAudio; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.verified.cs new file mode 100644 index 0000000000..b38369fe12 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioInputAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class ChatCompletionRequestMessageContentPartAudioInputAudio + { + /// + /// Base64 encoded audio data. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("data")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Data { get; set; } + + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + [global::System.Text.Json.Serialization.JsonPropertyName("format")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat Format { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Base64 encoded audio data. + /// + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ChatCompletionRequestMessageContentPartAudioInputAudio( + string data, + global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat format) + { + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.Format = format; + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionRequestMessageContentPartAudioInputAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs new file mode 100644 index 0000000000..c92b5e20df --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs + +#nullable enable + +namespace G +{ + /// + /// The format of the encoded audio data. Currently supports "wav" and "mp3". + /// + public enum ChatCompletionRequestMessageContentPartAudioInputAudioFormat + { + /// + /// + /// + Wav, + /// + /// + /// + Mp3, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionRequestMessageContentPartAudioInputAudioFormat value) + { + return value switch + { + ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Wav => "wav", + ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Mp3 => "mp3", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionRequestMessageContentPartAudioInputAudioFormat? ToEnum(string value) + { + return value switch + { + "wav" => ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Wav, + "mp3" => ChatCompletionRequestMessageContentPartAudioInputAudioFormat.Mp3, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs new file mode 100644 index 0000000000..85ece0934d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.ChatCompletionRequestMessageContentPartAudioType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the content part. Always `input_audio`. + /// + public enum ChatCompletionRequestMessageContentPartAudioType + { + /// + /// + /// + InputAudio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ChatCompletionRequestMessageContentPartAudioTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ChatCompletionRequestMessageContentPartAudioType value) + { + return value switch + { + ChatCompletionRequestMessageContentPartAudioType.InputAudio => "input_audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ChatCompletionRequestMessageContentPartAudioType? ToEnum(string value) + { + return value switch + { + "input_audio" => ChatCompletionRequestMessageContentPartAudioType.InputAudio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs index 0eaac6720e..8e308a0c57 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImage.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// + /// Learn about [image inputs](/docs/guides/vision). /// public sealed partial class ChatCompletionRequestMessageContentPartImage { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs index a9a6c3cbc0..6f1273ade0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrl.g.verified.cs @@ -17,7 +17,7 @@ public sealed partial class ChatCompletionRequestMessageContentPartImageImageUrl public required string Url { get; set; } /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto ///
[global::System.Text.Json.Serialization.JsonPropertyName("detail")] @@ -37,7 +37,7 @@ public sealed partial class ChatCompletionRequestMessageContentPartImageImageUrl /// Either a URL of the image or the base64 encoded image data. /// /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs index 394d80924f..2a3dffd3bb 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartImageImageUrlDetail.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ /// Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).
/// Default Value: auto ///
public enum ChatCompletionRequestMessageContentPartImageImageUrlDetail diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs index 649e8fff7d..4866c54c15 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestMessageContentPartText.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// public sealed partial class ChatCompletionRequestMessageContentPartText { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs index a9d8656e1e..a8fbe6d5ea 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestSystemMessageContentPart.g.verified.cs @@ -11,7 +11,7 @@ namespace G public readonly partial struct ChatCompletionRequestSystemMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs index 12c745b2c9..b9ee46522f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestToolMessageContentPart.g.verified.cs @@ -11,7 +11,7 @@ namespace G public readonly partial struct ChatCompletionRequestToolMessageContentPart : global::System.IEquatable { /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs index b71a513153..ad9952ba45 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPart.g.verified.cs @@ -16,7 +16,7 @@ namespace G public global::G.ChatCompletionRequestUserMessageContentPartDiscriminatorType? Type { get; } /// - /// + /// Learn about [text inputs](/docs/guides/text-generation). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartText? Text { get; init; } @@ -51,7 +51,7 @@ public ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionReque } /// - /// + /// Learn about [image inputs](/docs/guides/vision). /// #if NET6_0_OR_GREATER public global::G.ChatCompletionRequestMessageContentPartImage? ImageUrl { get; init; } @@ -85,25 +85,63 @@ public ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionReque ImageUrl = value; } + /// + /// Learn about [audio inputs](/docs/guides/audio). + /// +#if NET6_0_OR_GREATER + public global::G.ChatCompletionRequestMessageContentPartAudio? InputAudio { get; init; } +#else + public global::G.ChatCompletionRequestMessageContentPartAudio? InputAudio { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(InputAudio))] +#endif + public bool IsInputAudio => InputAudio != null; + + /// + /// + /// + public static implicit operator ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionRequestMessageContentPartAudio value) => new ChatCompletionRequestUserMessageContentPart(value); + + /// + /// + /// + public static implicit operator global::G.ChatCompletionRequestMessageContentPartAudio?(ChatCompletionRequestUserMessageContentPart @this) => @this.InputAudio; + + /// + /// + /// + public ChatCompletionRequestUserMessageContentPart(global::G.ChatCompletionRequestMessageContentPartAudio? value) + { + InputAudio = value; + } + /// /// /// public ChatCompletionRequestUserMessageContentPart( global::G.ChatCompletionRequestUserMessageContentPartDiscriminatorType? type, global::G.ChatCompletionRequestMessageContentPartText? text, - global::G.ChatCompletionRequestMessageContentPartImage? imageUrl + global::G.ChatCompletionRequestMessageContentPartImage? imageUrl, + global::G.ChatCompletionRequestMessageContentPartAudio? inputAudio ) { Type = type; Text = text; ImageUrl = imageUrl; + InputAudio = inputAudio; } /// /// /// public object? Object => + InputAudio as object ?? ImageUrl as object ?? Text as object ; @@ -113,7 +151,7 @@ Text as object ///
public bool Validate() { - return IsText && !IsImageUrl || !IsText && IsImageUrl; + return IsText && !IsImageUrl && !IsInputAudio || !IsText && IsImageUrl && !IsInputAudio || !IsText && !IsImageUrl && IsInputAudio; } /// @@ -122,6 +160,7 @@ public bool Validate() public TResult? Match( global::System.Func? text = null, global::System.Func? imageUrl = null, + global::System.Func? inputAudio = null, bool validate = true) { if (validate) @@ -137,6 +176,10 @@ public bool Validate() { return imageUrl(ImageUrl!); } + else if (IsInputAudio && inputAudio != null) + { + return inputAudio(InputAudio!); + } return default(TResult); } @@ -147,6 +190,7 @@ public bool Validate() public void Match( global::System.Action? text = null, global::System.Action? imageUrl = null, + global::System.Action? inputAudio = null, bool validate = true) { if (validate) @@ -162,6 +206,10 @@ public void Match( { imageUrl?.Invoke(ImageUrl!); } + else if (IsInputAudio) + { + inputAudio?.Invoke(InputAudio!); + } } /// @@ -175,6 +223,8 @@ public override int GetHashCode() typeof(global::G.ChatCompletionRequestMessageContentPartText), ImageUrl, typeof(global::G.ChatCompletionRequestMessageContentPartImage), + InputAudio, + typeof(global::G.ChatCompletionRequestMessageContentPartAudio), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -192,7 +242,8 @@ public bool Equals(ChatCompletionRequestUserMessageContentPart other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(InputAudio, other.InputAudio) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs index 8698c47948..c78cf1ad1a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionRequestUserMessageContentPartDiscriminatorType.g.verified.cs @@ -17,6 +17,10 @@ public enum ChatCompletionRequestUserMessageContentPartDiscriminatorType /// /// ImageUrl, + /// + /// + /// + InputAudio, } /// @@ -33,6 +37,7 @@ public static string ToValueString(this ChatCompletionRequestUserMessageContentP { ChatCompletionRequestUserMessageContentPartDiscriminatorType.Text => "text", ChatCompletionRequestUserMessageContentPartDiscriminatorType.ImageUrl => "image_url", + ChatCompletionRequestUserMessageContentPartDiscriminatorType.InputAudio => "input_audio", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } @@ -45,6 +50,7 @@ public static string ToValueString(this ChatCompletionRequestUserMessageContentP { "text" => ChatCompletionRequestUserMessageContentPartDiscriminatorType.Text, "image_url" => ChatCompletionRequestUserMessageContentPartDiscriminatorType.ImageUrl, + "input_audio" => ChatCompletionRequestUserMessageContentPartDiscriminatorType.InputAudio, _ => null, }; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs index 031651bf22..0abb91f5c7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessage.g.verified.cs @@ -43,6 +43,13 @@ public sealed partial class ChatCompletionResponseMessage [global::System.Obsolete("This property marked as deprecated.")] public global::G.ChatCompletionResponseMessageFunctionCall? FunctionCall { get; set; } + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public global::G.ChatCompletionResponseMessageAudio? Audio { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -64,17 +71,23 @@ public sealed partial class ChatCompletionResponseMessage /// /// The role of the author of this message. /// + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ChatCompletionResponseMessage( string? content, string? refusal, global::System.Collections.Generic.IList? toolCalls, - global::G.ChatCompletionResponseMessageRole role) + global::G.ChatCompletionResponseMessageRole role, + global::G.ChatCompletionResponseMessageAudio? audio) { this.Content = content ?? throw new global::System.ArgumentNullException(nameof(content)); this.Refusal = refusal ?? throw new global::System.ArgumentNullException(nameof(refusal)); this.ToolCalls = toolCalls; this.Role = role; + this.Audio = audio; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessageAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessageAudio.Json.g.verified.cs new file mode 100644 index 0000000000..f9f5331f91 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessageAudio.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ChatCompletionResponseMessageAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ChatCompletionResponseMessageAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ChatCompletionResponseMessageAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ChatCompletionResponseMessageAudio), + jsonSerializerContext) as global::G.ChatCompletionResponseMessageAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ChatCompletionResponseMessageAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ChatCompletionResponseMessageAudio), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ChatCompletionResponseMessageAudio; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessageAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessageAudio.g.verified.cs new file mode 100644 index 0000000000..bdb72bf24f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ChatCompletionResponseMessageAudio.g.verified.cs @@ -0,0 +1,89 @@ +//HintName: G.Models.ChatCompletionResponseMessageAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model. [Learn more](/docs/guides/audio). + ///
+ public sealed partial class ChatCompletionResponseMessageAudio + { + /// + /// Unique identifier for this audio response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Id { get; set; } + + /// + /// The Unix timestamp (in seconds) for when this audio response will
+ /// no longer be accessible on the server for use in multi-turn
+ /// conversations. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("expires_at")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UnixTimestampJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.DateTimeOffset ExpiresAt { get; set; } + + /// + /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("data")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Data { get; set; } + + /// + /// Transcript of the audio generated by the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Unique identifier for this audio response. + /// + /// + /// The Unix timestamp (in seconds) for when this audio response will
+ /// no longer be accessible on the server for use in multi-turn
+ /// conversations. + /// + /// + /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request. + /// + /// + /// Transcript of the audio generated by the model. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ChatCompletionResponseMessageAudio( + string id, + global::System.DateTimeOffset expiresAt, + string data, + string transcript) + { + this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); + this.ExpiresAt = expiresAt; + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.Transcript = transcript ?? throw new global::System.ArgumentNullException(nameof(transcript)); + } + + /// + /// Initializes a new instance of the class. + /// + public ChatCompletionResponseMessageAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsage.g.verified.cs index cdfb111165..2212c51f16 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsage.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsage.g.verified.cs @@ -30,6 +30,18 @@ public sealed partial class CompletionUsage [global::System.Text.Json.Serialization.JsonRequired] public required int TotalTokens { get; set; } + /// + /// Breakdown of tokens used in a completion. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("completion_tokens_details")] + public global::G.CompletionUsageCompletionTokensDetails? CompletionTokensDetails { get; set; } + + /// + /// Breakdown of tokens used in the prompt. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("prompt_tokens_details")] + public global::G.CompletionUsagePromptTokensDetails? PromptTokensDetails { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -48,15 +60,25 @@ public sealed partial class CompletionUsage /// /// Total number of tokens used in the request (prompt + completion). /// + /// + /// Breakdown of tokens used in a completion. + /// + /// + /// Breakdown of tokens used in the prompt. + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CompletionUsage( int completionTokens, int promptTokens, - int totalTokens) + int totalTokens, + global::G.CompletionUsageCompletionTokensDetails? completionTokensDetails, + global::G.CompletionUsagePromptTokensDetails? promptTokensDetails) { this.CompletionTokens = completionTokens; this.PromptTokens = promptTokens; this.TotalTokens = totalTokens; + this.CompletionTokensDetails = completionTokensDetails; + this.PromptTokensDetails = promptTokensDetails; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsageCompletionTokensDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsageCompletionTokensDetails.Json.g.verified.cs new file mode 100644 index 0000000000..5f27a4cfe9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsageCompletionTokensDetails.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CompletionUsageCompletionTokensDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CompletionUsageCompletionTokensDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CompletionUsageCompletionTokensDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CompletionUsageCompletionTokensDetails), + jsonSerializerContext) as global::G.CompletionUsageCompletionTokensDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CompletionUsageCompletionTokensDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CompletionUsageCompletionTokensDetails), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CompletionUsageCompletionTokensDetails; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsageCompletionTokensDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsageCompletionTokensDetails.g.verified.cs new file mode 100644 index 0000000000..7cc3e4a1d5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsageCompletionTokensDetails.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.Models.CompletionUsageCompletionTokensDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Breakdown of tokens used in a completion. + /// + public sealed partial class CompletionUsageCompletionTokensDetails + { + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that appeared in the completion. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("accepted_prediction_tokens")] + public int? AcceptedPredictionTokens { get; set; } + + /// + /// Audio input tokens generated by the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Tokens generated by the model for reasoning. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("reasoning_tokens")] + public int? ReasoningTokens { get; set; } + + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that did not appear in the completion. However, like
+ /// reasoning tokens, these tokens are still counted in the total
+ /// completion tokens for purposes of billing, output, and context window
+ /// limits. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("rejected_prediction_tokens")] + public int? RejectedPredictionTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that appeared in the completion. + /// + /// + /// Audio input tokens generated by the model. + /// + /// + /// Tokens generated by the model for reasoning. + /// + /// + /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that did not appear in the completion. However, like
+ /// reasoning tokens, these tokens are still counted in the total
+ /// completion tokens for purposes of billing, output, and context window
+ /// limits. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CompletionUsageCompletionTokensDetails( + int? acceptedPredictionTokens, + int? audioTokens, + int? reasoningTokens, + int? rejectedPredictionTokens) + { + this.AcceptedPredictionTokens = acceptedPredictionTokens; + this.AudioTokens = audioTokens; + this.ReasoningTokens = reasoningTokens; + this.RejectedPredictionTokens = rejectedPredictionTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public CompletionUsageCompletionTokensDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsagePromptTokensDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsagePromptTokensDetails.Json.g.verified.cs new file mode 100644 index 0000000000..5e0f366f83 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsagePromptTokensDetails.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CompletionUsagePromptTokensDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CompletionUsagePromptTokensDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CompletionUsagePromptTokensDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CompletionUsagePromptTokensDetails), + jsonSerializerContext) as global::G.CompletionUsagePromptTokensDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CompletionUsagePromptTokensDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CompletionUsagePromptTokensDetails), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CompletionUsagePromptTokensDetails; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsagePromptTokensDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsagePromptTokensDetails.g.verified.cs new file mode 100644 index 0000000000..fb1c84eb40 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CompletionUsagePromptTokensDetails.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CompletionUsagePromptTokensDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Breakdown of tokens used in the prompt. + /// + public sealed partial class CompletionUsagePromptTokensDetails + { + /// + /// Audio input tokens present in the prompt. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Cached tokens present in the prompt. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("cached_tokens")] + public int? CachedTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Audio input tokens present in the prompt. + /// + /// + /// Cached tokens present in the prompt. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CompletionUsagePromptTokensDetails( + int? audioTokens, + int? cachedTokens) + { + this.AudioTokens = audioTokens; + this.CachedTokens = cachedTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public CompletionUsagePromptTokensDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem.g.verified.cs index 048ae81008..164c4d4b13 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.MessageObjectContentItemDiscriminatorType? Type { get; } + public global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? Type { get; } /// /// References an image [File](/docs/api-reference/files) in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentImageFileObject? ImageFile { get; init; } + public global::G.MessageDeltaContentImageFileObject? ImageFile { get; init; } #else - public global::G.MessageContentImageFileObject? ImageFile { get; } + public global::G.MessageDeltaContentImageFileObject? ImageFile { get; } #endif /// @@ -35,152 +35,152 @@ namespace G /// /// /// - public static implicit operator ContentItem(global::G.MessageContentImageFileObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentImageFileObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentImageFileObject?(ContentItem @this) => @this.ImageFile; + public static implicit operator global::G.MessageDeltaContentImageFileObject?(ContentItem @this) => @this.ImageFile; /// /// /// - public ContentItem(global::G.MessageContentImageFileObject? value) + public ContentItem(global::G.MessageDeltaContentImageFileObject? value) { ImageFile = value; } /// - /// References an image URL in the content of a message. + /// The text content that is part of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentImageUrlObject? ImageUrl { get; init; } + public global::G.MessageDeltaContentTextObject? Text { get; init; } #else - public global::G.MessageContentImageUrlObject? ImageUrl { get; } + public global::G.MessageDeltaContentTextObject? Text { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] #endif - public bool IsImageUrl => ImageUrl != null; + public bool IsText => Text != null; /// /// /// - public static implicit operator ContentItem(global::G.MessageContentImageUrlObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentTextObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentImageUrlObject?(ContentItem @this) => @this.ImageUrl; + public static implicit operator global::G.MessageDeltaContentTextObject?(ContentItem @this) => @this.Text; /// /// /// - public ContentItem(global::G.MessageContentImageUrlObject? value) + public ContentItem(global::G.MessageDeltaContentTextObject? value) { - ImageUrl = value; + Text = value; } /// - /// The text content that is part of a message. + /// The refusal content that is part of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentTextObject? Text { get; init; } + public global::G.MessageDeltaContentRefusalObject? Refusal { get; init; } #else - public global::G.MessageContentTextObject? Text { get; } + public global::G.MessageDeltaContentRefusalObject? Refusal { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] #endif - public bool IsText => Text != null; + public bool IsRefusal => Refusal != null; /// /// /// - public static implicit operator ContentItem(global::G.MessageContentTextObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentRefusalObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentTextObject?(ContentItem @this) => @this.Text; + public static implicit operator global::G.MessageDeltaContentRefusalObject?(ContentItem @this) => @this.Refusal; /// /// /// - public ContentItem(global::G.MessageContentTextObject? value) + public ContentItem(global::G.MessageDeltaContentRefusalObject? value) { - Text = value; + Refusal = value; } /// - /// The refusal content generated by the assistant. + /// References an image URL in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageContentRefusalObject? Refusal { get; init; } + public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; init; } #else - public global::G.MessageContentRefusalObject? Refusal { get; } + public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] #endif - public bool IsRefusal => Refusal != null; + public bool IsImageUrl => ImageUrl != null; /// /// /// - public static implicit operator ContentItem(global::G.MessageContentRefusalObject value) => new ContentItem(value); + public static implicit operator ContentItem(global::G.MessageDeltaContentImageUrlObject value) => new ContentItem(value); /// /// /// - public static implicit operator global::G.MessageContentRefusalObject?(ContentItem @this) => @this.Refusal; + public static implicit operator global::G.MessageDeltaContentImageUrlObject?(ContentItem @this) => @this.ImageUrl; /// /// /// - public ContentItem(global::G.MessageContentRefusalObject? value) + public ContentItem(global::G.MessageDeltaContentImageUrlObject? value) { - Refusal = value; + ImageUrl = value; } /// /// /// public ContentItem( - global::G.MessageObjectContentItemDiscriminatorType? type, - global::G.MessageContentImageFileObject? imageFile, - global::G.MessageContentImageUrlObject? imageUrl, - global::G.MessageContentTextObject? text, - global::G.MessageContentRefusalObject? refusal + global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? type, + global::G.MessageDeltaContentImageFileObject? imageFile, + global::G.MessageDeltaContentTextObject? text, + global::G.MessageDeltaContentRefusalObject? refusal, + global::G.MessageDeltaContentImageUrlObject? imageUrl ) { Type = type; ImageFile = imageFile; - ImageUrl = imageUrl; Text = text; Refusal = refusal; + ImageUrl = imageUrl; } /// /// /// public object? Object => + ImageUrl as object ?? Refusal as object ?? Text as object ?? - ImageUrl as object ?? ImageFile as object ; @@ -189,17 +189,17 @@ ImageFile as object /// public bool Validate() { - return IsImageFile && !IsImageUrl && !IsText && !IsRefusal || !IsImageFile && IsImageUrl && !IsText && !IsRefusal || !IsImageFile && !IsImageUrl && IsText && !IsRefusal || !IsImageFile && !IsImageUrl && !IsText && IsRefusal; + return IsImageFile && !IsText && !IsRefusal && !IsImageUrl || !IsImageFile && IsText && !IsRefusal && !IsImageUrl || !IsImageFile && !IsText && IsRefusal && !IsImageUrl || !IsImageFile && !IsText && !IsRefusal && IsImageUrl; } /// /// /// public TResult? Match( - global::System.Func? imageFile = null, - global::System.Func? imageUrl = null, - global::System.Func? text = null, - global::System.Func? refusal = null, + global::System.Func? imageFile = null, + global::System.Func? text = null, + global::System.Func? refusal = null, + global::System.Func? imageUrl = null, bool validate = true) { if (validate) @@ -211,10 +211,6 @@ public bool Validate() { return imageFile(ImageFile!); } - else if (IsImageUrl && imageUrl != null) - { - return imageUrl(ImageUrl!); - } else if (IsText && text != null) { return text(Text!); @@ -223,6 +219,10 @@ public bool Validate() { return refusal(Refusal!); } + else if (IsImageUrl && imageUrl != null) + { + return imageUrl(ImageUrl!); + } return default(TResult); } @@ -231,10 +231,10 @@ public bool Validate() /// ///
public void Match( - global::System.Action? imageFile = null, - global::System.Action? imageUrl = null, - global::System.Action? text = null, - global::System.Action? refusal = null, + global::System.Action? imageFile = null, + global::System.Action? text = null, + global::System.Action? refusal = null, + global::System.Action? imageUrl = null, bool validate = true) { if (validate) @@ -246,10 +246,6 @@ public void Match( { imageFile?.Invoke(ImageFile!); } - else if (IsImageUrl) - { - imageUrl?.Invoke(ImageUrl!); - } else if (IsText) { text?.Invoke(Text!); @@ -258,6 +254,10 @@ public void Match( { refusal?.Invoke(Refusal!); } + else if (IsImageUrl) + { + imageUrl?.Invoke(ImageUrl!); + } } /// @@ -268,13 +268,13 @@ public override int GetHashCode() var fields = new object?[] { ImageFile, - typeof(global::G.MessageContentImageFileObject), - ImageUrl, - typeof(global::G.MessageContentImageUrlObject), + typeof(global::G.MessageDeltaContentImageFileObject), Text, - typeof(global::G.MessageContentTextObject), + typeof(global::G.MessageDeltaContentTextObject), Refusal, - typeof(global::G.MessageContentRefusalObject), + typeof(global::G.MessageDeltaContentRefusalObject), + ImageUrl, + typeof(global::G.MessageDeltaContentImageUrlObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -291,10 +291,10 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ContentItem other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem2.g.verified.cs index c472174993..079b89bd27 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem2.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ContentItem2.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? Type { get; } + public global::G.MessageObjectContentItemDiscriminatorType? Type { get; } /// /// References an image [File](/docs/api-reference/files) in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentImageFileObject? ImageFile { get; init; } + public global::G.MessageContentImageFileObject? ImageFile { get; init; } #else - public global::G.MessageDeltaContentImageFileObject? ImageFile { get; } + public global::G.MessageContentImageFileObject? ImageFile { get; } #endif /// @@ -35,152 +35,152 @@ namespace G /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentImageFileObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentImageFileObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentImageFileObject?(ContentItem2 @this) => @this.ImageFile; + public static implicit operator global::G.MessageContentImageFileObject?(ContentItem2 @this) => @this.ImageFile; /// /// /// - public ContentItem2(global::G.MessageDeltaContentImageFileObject? value) + public ContentItem2(global::G.MessageContentImageFileObject? value) { ImageFile = value; } /// - /// The text content that is part of a message. + /// References an image URL in the content of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentTextObject? Text { get; init; } + public global::G.MessageContentImageUrlObject? ImageUrl { get; init; } #else - public global::G.MessageDeltaContentTextObject? Text { get; } + public global::G.MessageContentImageUrlObject? ImageUrl { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] #endif - public bool IsText => Text != null; + public bool IsImageUrl => ImageUrl != null; /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentTextObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentImageUrlObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentTextObject?(ContentItem2 @this) => @this.Text; + public static implicit operator global::G.MessageContentImageUrlObject?(ContentItem2 @this) => @this.ImageUrl; /// /// /// - public ContentItem2(global::G.MessageDeltaContentTextObject? value) + public ContentItem2(global::G.MessageContentImageUrlObject? value) { - Text = value; + ImageUrl = value; } /// - /// The refusal content that is part of a message. + /// The text content that is part of a message. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentRefusalObject? Refusal { get; init; } + public global::G.MessageContentTextObject? Text { get; init; } #else - public global::G.MessageDeltaContentRefusalObject? Refusal { get; } + public global::G.MessageContentTextObject? Text { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] #endif - public bool IsRefusal => Refusal != null; + public bool IsText => Text != null; /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentRefusalObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentTextObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentRefusalObject?(ContentItem2 @this) => @this.Refusal; + public static implicit operator global::G.MessageContentTextObject?(ContentItem2 @this) => @this.Text; /// /// /// - public ContentItem2(global::G.MessageDeltaContentRefusalObject? value) + public ContentItem2(global::G.MessageContentTextObject? value) { - Refusal = value; + Text = value; } /// - /// References an image URL in the content of a message. + /// The refusal content generated by the assistant. /// #if NET6_0_OR_GREATER - public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; init; } + public global::G.MessageContentRefusalObject? Refusal { get; init; } #else - public global::G.MessageDeltaContentImageUrlObject? ImageUrl { get; } + public global::G.MessageContentRefusalObject? Refusal { get; } #endif /// /// /// #if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Refusal))] #endif - public bool IsImageUrl => ImageUrl != null; + public bool IsRefusal => Refusal != null; /// /// /// - public static implicit operator ContentItem2(global::G.MessageDeltaContentImageUrlObject value) => new ContentItem2(value); + public static implicit operator ContentItem2(global::G.MessageContentRefusalObject value) => new ContentItem2(value); /// /// /// - public static implicit operator global::G.MessageDeltaContentImageUrlObject?(ContentItem2 @this) => @this.ImageUrl; + public static implicit operator global::G.MessageContentRefusalObject?(ContentItem2 @this) => @this.Refusal; /// /// /// - public ContentItem2(global::G.MessageDeltaContentImageUrlObject? value) + public ContentItem2(global::G.MessageContentRefusalObject? value) { - ImageUrl = value; + Refusal = value; } /// /// /// public ContentItem2( - global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? type, - global::G.MessageDeltaContentImageFileObject? imageFile, - global::G.MessageDeltaContentTextObject? text, - global::G.MessageDeltaContentRefusalObject? refusal, - global::G.MessageDeltaContentImageUrlObject? imageUrl + global::G.MessageObjectContentItemDiscriminatorType? type, + global::G.MessageContentImageFileObject? imageFile, + global::G.MessageContentImageUrlObject? imageUrl, + global::G.MessageContentTextObject? text, + global::G.MessageContentRefusalObject? refusal ) { Type = type; ImageFile = imageFile; + ImageUrl = imageUrl; Text = text; Refusal = refusal; - ImageUrl = imageUrl; } /// /// /// public object? Object => - ImageUrl as object ?? Refusal as object ?? Text as object ?? + ImageUrl as object ?? ImageFile as object ; @@ -189,17 +189,17 @@ ImageFile as object /// public bool Validate() { - return IsImageFile && !IsText && !IsRefusal && !IsImageUrl || !IsImageFile && IsText && !IsRefusal && !IsImageUrl || !IsImageFile && !IsText && IsRefusal && !IsImageUrl || !IsImageFile && !IsText && !IsRefusal && IsImageUrl; + return IsImageFile && !IsImageUrl && !IsText && !IsRefusal || !IsImageFile && IsImageUrl && !IsText && !IsRefusal || !IsImageFile && !IsImageUrl && IsText && !IsRefusal || !IsImageFile && !IsImageUrl && !IsText && IsRefusal; } /// /// /// public TResult? Match( - global::System.Func? imageFile = null, - global::System.Func? text = null, - global::System.Func? refusal = null, - global::System.Func? imageUrl = null, + global::System.Func? imageFile = null, + global::System.Func? imageUrl = null, + global::System.Func? text = null, + global::System.Func? refusal = null, bool validate = true) { if (validate) @@ -211,6 +211,10 @@ public bool Validate() { return imageFile(ImageFile!); } + else if (IsImageUrl && imageUrl != null) + { + return imageUrl(ImageUrl!); + } else if (IsText && text != null) { return text(Text!); @@ -219,10 +223,6 @@ public bool Validate() { return refusal(Refusal!); } - else if (IsImageUrl && imageUrl != null) - { - return imageUrl(ImageUrl!); - } return default(TResult); } @@ -231,10 +231,10 @@ public bool Validate() /// /// public void Match( - global::System.Action? imageFile = null, - global::System.Action? text = null, - global::System.Action? refusal = null, - global::System.Action? imageUrl = null, + global::System.Action? imageFile = null, + global::System.Action? imageUrl = null, + global::System.Action? text = null, + global::System.Action? refusal = null, bool validate = true) { if (validate) @@ -246,6 +246,10 @@ public void Match( { imageFile?.Invoke(ImageFile!); } + else if (IsImageUrl) + { + imageUrl?.Invoke(ImageUrl!); + } else if (IsText) { text?.Invoke(Text!); @@ -254,10 +258,6 @@ public void Match( { refusal?.Invoke(Refusal!); } - else if (IsImageUrl) - { - imageUrl?.Invoke(ImageUrl!); - } } /// @@ -268,13 +268,13 @@ public override int GetHashCode() var fields = new object?[] { ImageFile, - typeof(global::G.MessageDeltaContentImageFileObject), + typeof(global::G.MessageContentImageFileObject), + ImageUrl, + typeof(global::G.MessageContentImageUrlObject), Text, - typeof(global::G.MessageDeltaContentTextObject), + typeof(global::G.MessageContentTextObject), Refusal, - typeof(global::G.MessageDeltaContentRefusalObject), - ImageUrl, - typeof(global::G.MessageDeltaContentImageUrlObject), + typeof(global::G.MessageContentRefusalObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -291,10 +291,10 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ContentItem2 other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageFile, other.ImageFile) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Refusal, other.Refusal) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResult.Json.g.verified.cs new file mode 100644 index 0000000000..63b3ffae86 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CostsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CostsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CostsResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CostsResult), + jsonSerializerContext) as global::G.CostsResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CostsResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CostsResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CostsResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResult.g.verified.cs new file mode 100644 index 0000000000..dea6233781 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResult.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.CostsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated costs details of the specific time bucket. + /// + public sealed partial class CostsResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CostsResultObjectJsonConverter))] + public global::G.CostsResultObject Object { get; set; } + + /// + /// The monetary value in its associated currency. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("amount")] + public global::G.CostsResultAmount? Amount { get; set; } + + /// + /// When `group_by=line_item`, this field provides the line item of the grouped costs result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("line_item")] + public string? LineItem { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped costs result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The monetary value in its associated currency. + /// + /// + /// When `group_by=line_item`, this field provides the line item of the grouped costs result. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped costs result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CostsResult( + global::G.CostsResultObject @object, + global::G.CostsResultAmount? amount, + string? lineItem, + string? projectId) + { + this.Object = @object; + this.Amount = amount; + this.LineItem = lineItem; + this.ProjectId = projectId; + } + + /// + /// Initializes a new instance of the class. + /// + public CostsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultAmount.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultAmount.Json.g.verified.cs new file mode 100644 index 0000000000..b2c79d06ea --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultAmount.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CostsResultAmount.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CostsResultAmount + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CostsResultAmount? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CostsResultAmount), + jsonSerializerContext) as global::G.CostsResultAmount; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CostsResultAmount? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CostsResultAmount), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CostsResultAmount; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultAmount.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultAmount.g.verified.cs new file mode 100644 index 0000000000..d4666fed4f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultAmount.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.CostsResultAmount.g.cs + +#nullable enable + +namespace G +{ + /// + /// The monetary value in its associated currency. + /// + public sealed partial class CostsResultAmount + { + /// + /// The numeric value of the cost. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("value")] + public double? Value { get; set; } + + /// + /// Lowercase ISO-4217 currency e.g. "usd" + /// + [global::System.Text.Json.Serialization.JsonPropertyName("currency")] + public string? Currency { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The numeric value of the cost. + /// + /// + /// Lowercase ISO-4217 currency e.g. "usd" + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CostsResultAmount( + double? value, + string? currency) + { + this.Value = value; + this.Currency = currency; + } + + /// + /// Initializes a new instance of the class. + /// + public CostsResultAmount() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultObject.g.verified.cs new file mode 100644 index 0000000000..6fd15ce817 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CostsResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CostsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CostsResultObject + { + /// + /// + /// + OrganizationCostsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CostsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CostsResultObject value) + { + return value switch + { + CostsResultObject.OrganizationCostsResult => "organization.costs.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CostsResultObject? ToEnum(string value) + { + return value switch + { + "organization.costs.result" => CostsResultObject.OrganizationCostsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequest.g.verified.cs index 4228101e11..647f47631c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequest.g.verified.cs @@ -12,7 +12,7 @@ namespace G public sealed partial class CreateAssistantRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o ///
/// gpt-4o @@ -40,7 +40,8 @@ public sealed partial class CreateAssistantRequest public string? Instructions { get; set; } /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("tools")] public global::System.Collections.Generic.IList? Tools { get; set; } @@ -52,13 +53,13 @@ public sealed partial class CreateAssistantRequest public global::G.CreateAssistantRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -67,7 +68,8 @@ public sealed partial class CreateAssistantRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -76,9 +78,9 @@ public sealed partial class CreateAssistantRequest public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] @@ -95,7 +97,7 @@ public sealed partial class CreateAssistantRequest /// Initializes a new instance of the class. ///
/// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: gpt-4o /// /// @@ -108,28 +110,30 @@ public sealed partial class CreateAssistantRequest /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs index be970fa93a..37412c22de 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateAssistantRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs index e2ae86f5a6..488b2df6f3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestModel.g.verified.cs @@ -16,6 +16,10 @@ public enum CreateAssistantRequestModel /// /// /// + Gpt4o20241120, + /// + /// + /// Gpt4o20240806, /// /// @@ -116,6 +120,7 @@ public static string ToValueString(this CreateAssistantRequestModel value) return value switch { CreateAssistantRequestModel.Gpt4o => "gpt-4o", + CreateAssistantRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateAssistantRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateAssistantRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", CreateAssistantRequestModel.Gpt4oMini => "gpt-4o-mini", @@ -149,6 +154,7 @@ public static string ToValueString(this CreateAssistantRequestModel value) return value switch { "gpt-4o" => CreateAssistantRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateAssistantRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateAssistantRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateAssistantRequestModel.Gpt4o20240513, "gpt-4o-mini" => CreateAssistantRequestModel.Gpt4oMini, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs index c60dbe7fbf..ac0b39b19c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class CreateAssistantRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class CreateAssistantRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateAssistantRequestToolResourcesCodeInterpreter( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs index 9e26800dd9..da6c540144 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStore.g.verified.cs @@ -23,7 +23,7 @@ public sealed partial class CreateAssistantRequestToolResourcesFileSearchVectorS public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy? ChunkingStrategy { get; set; } /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -44,7 +44,7 @@ public sealed partial class CreateAssistantRequestToolResourcesFileSearchVectorS /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateAssistantRequestToolResourcesFileSearchVectorStore( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs index 14d10cf7f0..149f9d9cee 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateBatchRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateBatchRequest.g.verified.cs index fb52235632..bac8d29393 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateBatchRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateBatchRequest.g.verified.cs @@ -12,7 +12,7 @@ public sealed partial class CreateBatchRequest /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. ///
[global::System.Text.Json.Serialization.JsonPropertyName("input_file_id")] [global::System.Text.Json.Serialization.JsonRequired] @@ -51,7 +51,7 @@ public sealed partial class CreateBatchRequest /// /// The ID of an uploaded file that contains requests for the new batch.
/// See [upload file](/docs/api-reference/files/create) for how to upload a file.
- /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. /// /// /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs index 1c3ed2ff09..59542e9e12 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequest.g.verified.cs @@ -12,14 +12,17 @@ namespace G public sealed partial class CreateChatCompletionRequest { /// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). ///
[global::System.Text.Json.Serialization.JsonPropertyName("messages")] [global::System.Text.Json.Serialization.JsonRequired] public required global::System.Collections.Generic.IList Messages { get; set; } /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o ///
/// gpt-4o @@ -28,9 +31,24 @@ public sealed partial class CreateChatCompletionRequest [global::System.Text.Json.Serialization.JsonRequired] public required global::G.AnyOf Model { get; set; } + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("store")] + public bool? Store { get; set; } + + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] + public global::System.Collections.Generic.Dictionary? Metadata { get; set; } + /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("frequency_penalty")] @@ -57,12 +75,19 @@ public sealed partial class CreateChatCompletionRequest public int? TopLogprobs { get; set; } /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.
+ /// This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). ///
[global::System.Text.Json.Serialization.JsonPropertyName("max_tokens")] + [global::System.Obsolete("This property marked as deprecated.")] public int? MaxTokens { get; set; } + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_completion_tokens")] + public int? MaxCompletionTokens { get; set; } + /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
@@ -72,18 +97,46 @@ public sealed partial class CreateChatCompletionRequest [global::System.Text.Json.Serialization.JsonPropertyName("n")] public int? N { get; set; } + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("prediction")] + public global::G.PredictionContent? Prediction { get; set; } + + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public global::G.CreateChatCompletionRequestAudio? Audio { get; set; } + /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("presence_penalty")] public double? PresencePenalty { get; set; } /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] @@ -100,10 +153,12 @@ public sealed partial class CreateChatCompletionRequest /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto ///
[global::System.Text.Json.Serialization.JsonPropertyName("service_tier")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateChatCompletionRequestServiceTierJsonConverter))] @@ -168,13 +223,14 @@ public sealed partial class CreateChatCompletionRequest public global::G.ChatCompletionToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -211,15 +267,27 @@ public sealed partial class CreateChatCompletionRequest /// Initializes a new instance of the class. ///
/// - /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + /// A list of messages comprising the conversation so far. Depending on the
+ /// [model](/docs/models) you use, different message types (modalities) are
+ /// supported, like [text](/docs/guides/text-generation),
+ /// [images](/docs/guides/vision), and [audio](/docs/guides/audio). /// /// - /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
+ /// ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.
/// Example: gpt-4o /// + /// + /// Whether or not to store the output of this chat completion request
+ /// for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.
+ /// Default Value: false + /// + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -233,24 +301,42 @@ public sealed partial class CreateChatCompletionRequest /// /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. /// - /// - /// The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.
- /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// + /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). /// /// /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
/// Default Value: 1
/// Example: 1 /// + /// + /// Output types that you would like the model to generate for this request.
+ /// Most models are capable of generating text, which is the default:
+ /// `["text"]`
+ /// The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To
+ /// request that this model generate both text and audio responses, you can
+ /// use:
+ /// `["text", "audio"]` + /// + /// + /// Configuration for a [Predicted Output](/docs/guides/predicted-outputs),
+ /// which can greatly improve response times when large parts of the model
+ /// response are known ahead of time. This is most common when you are
+ /// regenerating a file with only minor changes to most of the content. + /// + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// /// @@ -260,10 +346,12 @@ public sealed partial class CreateChatCompletionRequest /// /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto /// /// /// Up to 4 sequences where the API will stop generating further tokens. @@ -299,22 +387,28 @@ public sealed partial class CreateChatCompletionRequest /// `none` is the default when no tools are present. `auto` is the default if tools are present. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateChatCompletionRequest( global::System.Collections.Generic.IList messages, global::G.AnyOf model, + bool? store, + global::System.Collections.Generic.Dictionary? metadata, double? frequencyPenalty, global::System.Collections.Generic.Dictionary? logitBias, bool? logprobs, int? topLogprobs, - int? maxTokens, + int? maxCompletionTokens, int? n, + global::System.Collections.Generic.IList? modalities, + global::G.PredictionContent? prediction, + global::G.CreateChatCompletionRequestAudio? audio, double? presencePenalty, global::G.ResponseFormat? responseFormat, int? seed, @@ -331,12 +425,17 @@ public CreateChatCompletionRequest( { this.Messages = messages ?? throw new global::System.ArgumentNullException(nameof(messages)); this.Model = model; + this.Store = store; + this.Metadata = metadata; this.FrequencyPenalty = frequencyPenalty; this.LogitBias = logitBias; this.Logprobs = logprobs; this.TopLogprobs = topLogprobs; - this.MaxTokens = maxTokens; + this.MaxCompletionTokens = maxCompletionTokens; this.N = n; + this.Modalities = modalities; + this.Prediction = prediction; + this.Audio = audio; this.PresencePenalty = presencePenalty; this.ResponseFormat = responseFormat; this.Seed = seed; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudio.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudio.Json.g.verified.cs new file mode 100644 index 0000000000..cad414abf4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudio.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CreateChatCompletionRequestAudio.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateChatCompletionRequestAudio + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CreateChatCompletionRequestAudio? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CreateChatCompletionRequestAudio), + jsonSerializerContext) as global::G.CreateChatCompletionRequestAudio; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateChatCompletionRequestAudio? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CreateChatCompletionRequestAudio), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CreateChatCompletionRequestAudio; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudio.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudio.g.verified.cs new file mode 100644 index 0000000000..e30952d19c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudio.g.verified.cs @@ -0,0 +1,62 @@ +//HintName: G.Models.CreateChatCompletionRequestAudio.g.cs + +#nullable enable + +namespace G +{ + /// + /// Parameters for audio output. Required when audio output is requested with
+ /// `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + ///
+ public sealed partial class CreateChatCompletionRequestAudio + { + /// + /// The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateChatCompletionRequestAudioVoiceJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.CreateChatCompletionRequestAudioVoice Voice { get; set; } + + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("format")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateChatCompletionRequestAudioFormatJsonConverter))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.CreateChatCompletionRequestAudioFormat Format { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). + /// + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CreateChatCompletionRequestAudio( + global::G.CreateChatCompletionRequestAudioVoice voice, + global::G.CreateChatCompletionRequestAudioFormat format) + { + this.Voice = voice; + this.Format = format; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateChatCompletionRequestAudio() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudioFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudioFormat.g.verified.cs new file mode 100644 index 0000000000..c9dbb26d87 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudioFormat.g.verified.cs @@ -0,0 +1,71 @@ +//HintName: G.Models.CreateChatCompletionRequestAudioFormat.g.cs + +#nullable enable + +namespace G +{ + /// + /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
+ /// `opus`, or `pcm16`. + ///
+ public enum CreateChatCompletionRequestAudioFormat + { + /// + /// + /// + Wav, + /// + /// + /// + Mp3, + /// + /// + /// + Flac, + /// + /// + /// + Opus, + /// + /// + /// + Pcm16, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateChatCompletionRequestAudioFormatExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateChatCompletionRequestAudioFormat value) + { + return value switch + { + CreateChatCompletionRequestAudioFormat.Wav => "wav", + CreateChatCompletionRequestAudioFormat.Mp3 => "mp3", + CreateChatCompletionRequestAudioFormat.Flac => "flac", + CreateChatCompletionRequestAudioFormat.Opus => "opus", + CreateChatCompletionRequestAudioFormat.Pcm16 => "pcm16", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateChatCompletionRequestAudioFormat? ToEnum(string value) + { + return value switch + { + "wav" => CreateChatCompletionRequestAudioFormat.Wav, + "mp3" => CreateChatCompletionRequestAudioFormat.Mp3, + "flac" => CreateChatCompletionRequestAudioFormat.Flac, + "opus" => CreateChatCompletionRequestAudioFormat.Opus, + "pcm16" => CreateChatCompletionRequestAudioFormat.Pcm16, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudioVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudioVoice.g.verified.cs new file mode 100644 index 0000000000..b0ab5eddfa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestAudioVoice.g.verified.cs @@ -0,0 +1,88 @@ +//HintName: G.Models.CreateChatCompletionRequestAudioVoice.g.cs + +#nullable enable + +namespace G +{ + /// + /// The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). + /// + public enum CreateChatCompletionRequestAudioVoice + { + /// + /// + /// + Alloy, + /// + /// + /// + Ash, + /// + /// + /// + Ballad, + /// + /// + /// + Coral, + /// + /// + /// + Echo, + /// + /// + /// + Sage, + /// + /// + /// + Shimmer, + /// + /// + /// + Verse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateChatCompletionRequestAudioVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateChatCompletionRequestAudioVoice value) + { + return value switch + { + CreateChatCompletionRequestAudioVoice.Alloy => "alloy", + CreateChatCompletionRequestAudioVoice.Ash => "ash", + CreateChatCompletionRequestAudioVoice.Ballad => "ballad", + CreateChatCompletionRequestAudioVoice.Coral => "coral", + CreateChatCompletionRequestAudioVoice.Echo => "echo", + CreateChatCompletionRequestAudioVoice.Sage => "sage", + CreateChatCompletionRequestAudioVoice.Shimmer => "shimmer", + CreateChatCompletionRequestAudioVoice.Verse => "verse", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateChatCompletionRequestAudioVoice? ToEnum(string value) + { + return value switch + { + "alloy" => CreateChatCompletionRequestAudioVoice.Alloy, + "ash" => CreateChatCompletionRequestAudioVoice.Ash, + "ballad" => CreateChatCompletionRequestAudioVoice.Ballad, + "coral" => CreateChatCompletionRequestAudioVoice.Coral, + "echo" => CreateChatCompletionRequestAudioVoice.Echo, + "sage" => CreateChatCompletionRequestAudioVoice.Sage, + "shimmer" => CreateChatCompletionRequestAudioVoice.Shimmer, + "verse" => CreateChatCompletionRequestAudioVoice.Verse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestMetadata.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestMetadata.Json.g.verified.cs new file mode 100644 index 0000000000..93de2308c6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestMetadata.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CreateChatCompletionRequestMetadata.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateChatCompletionRequestMetadata + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CreateChatCompletionRequestMetadata? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CreateChatCompletionRequestMetadata), + jsonSerializerContext) as global::G.CreateChatCompletionRequestMetadata; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateChatCompletionRequestMetadata? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CreateChatCompletionRequestMetadata), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CreateChatCompletionRequestMetadata; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestMetadata.g.verified.cs new file mode 100644 index 0000000000..6eddb9c944 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestMetadata.g.verified.cs @@ -0,0 +1,29 @@ +//HintName: G.Models.CreateChatCompletionRequestMetadata.g.cs + +#nullable enable + +namespace G +{ + /// + /// Developer-defined tags and values used for filtering completions
+ /// in the [dashboard](https://platform.openai.com/chat-completions). + ///
+ public sealed partial class CreateChatCompletionRequestMetadata + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CreateChatCompletionRequestMetadata( + ) + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs index 663dae0f28..736ae2902a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestModel.g.verified.cs @@ -9,6 +9,22 @@ namespace G ///
public enum CreateChatCompletionRequestModel { + /// + /// + /// + O1Preview, + /// + /// + /// + O1Preview20240912, + /// + /// + /// + O1Mini, + /// + /// + /// + O1Mini20240912, /// /// /// @@ -16,6 +32,10 @@ public enum CreateChatCompletionRequestModel /// /// /// + Gpt4o20241120, + /// + /// + /// Gpt4o20240806, /// /// @@ -24,6 +44,26 @@ public enum CreateChatCompletionRequestModel /// /// /// + Gpt4oRealtimePreview, + /// + /// + /// + Gpt4oRealtimePreview20241001, + /// + /// + /// + Gpt4oAudioPreview, + /// + /// + /// + Gpt4oAudioPreview20241001, + /// + /// + /// + Chatgpt4oLatest, + /// + /// + /// Gpt4oMini, /// /// @@ -119,9 +159,19 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) { return value switch { + CreateChatCompletionRequestModel.O1Preview => "o1-preview", + CreateChatCompletionRequestModel.O1Preview20240912 => "o1-preview-2024-09-12", + CreateChatCompletionRequestModel.O1Mini => "o1-mini", + CreateChatCompletionRequestModel.O1Mini20240912 => "o1-mini-2024-09-12", CreateChatCompletionRequestModel.Gpt4o => "gpt-4o", + CreateChatCompletionRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateChatCompletionRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateChatCompletionRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", + CreateChatCompletionRequestModel.Gpt4oRealtimePreview => "gpt-4o-realtime-preview", + CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001 => "gpt-4o-realtime-preview-2024-10-01", + CreateChatCompletionRequestModel.Gpt4oAudioPreview => "gpt-4o-audio-preview", + CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001 => "gpt-4o-audio-preview-2024-10-01", + CreateChatCompletionRequestModel.Chatgpt4oLatest => "chatgpt-4o-latest", CreateChatCompletionRequestModel.Gpt4oMini => "gpt-4o-mini", CreateChatCompletionRequestModel.Gpt4oMini20240718 => "gpt-4o-mini-2024-07-18", CreateChatCompletionRequestModel.Gpt4Turbo => "gpt-4-turbo", @@ -153,9 +203,19 @@ public static string ToValueString(this CreateChatCompletionRequestModel value) { return value switch { + "o1-preview" => CreateChatCompletionRequestModel.O1Preview, + "o1-preview-2024-09-12" => CreateChatCompletionRequestModel.O1Preview20240912, + "o1-mini" => CreateChatCompletionRequestModel.O1Mini, + "o1-mini-2024-09-12" => CreateChatCompletionRequestModel.O1Mini20240912, "gpt-4o" => CreateChatCompletionRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateChatCompletionRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateChatCompletionRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateChatCompletionRequestModel.Gpt4o20240513, + "gpt-4o-realtime-preview" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview, + "gpt-4o-realtime-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oRealtimePreview20241001, + "gpt-4o-audio-preview" => CreateChatCompletionRequestModel.Gpt4oAudioPreview, + "gpt-4o-audio-preview-2024-10-01" => CreateChatCompletionRequestModel.Gpt4oAudioPreview20241001, + "chatgpt-4o-latest" => CreateChatCompletionRequestModel.Chatgpt4oLatest, "gpt-4o-mini" => CreateChatCompletionRequestModel.Gpt4oMini, "gpt-4o-mini-2024-07-18" => CreateChatCompletionRequestModel.Gpt4oMini20240718, "gpt-4-turbo" => CreateChatCompletionRequestModel.Gpt4Turbo, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs index a0fe994f8d..99873422a9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionRequestServiceTier.g.verified.cs @@ -6,10 +6,12 @@ namespace G { /// /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
- /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
+ /// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
/// - When not set, the default behavior is 'auto'.
- /// When this parameter is set, the response body will include the `service_tier` utilized. + /// When this parameter is set, the response body will include the `service_tier` utilized.
+ /// Default Value: auto ///
public enum CreateChatCompletionRequestServiceTier { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs index 039076df7a..70b10ffb37 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateChatCompletionResponseChoice.g.verified.cs @@ -38,7 +38,8 @@ public sealed partial class CreateChatCompletionResponseChoice /// Log probability information for the choice. ///
[global::System.Text.Json.Serialization.JsonPropertyName("logprobs")] - public global::G.CreateChatCompletionResponseChoiceLogprobs? Logprobs { get; set; } + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.CreateChatCompletionResponseChoiceLogprobs? Logprobs { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -74,7 +75,7 @@ public CreateChatCompletionResponseChoice( this.FinishReason = finishReason; this.Index = index; this.Message = message ?? throw new global::System.ArgumentNullException(nameof(message)); - this.Logprobs = logprobs; + this.Logprobs = logprobs ?? throw new global::System.ArgumentNullException(nameof(logprobs)); } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateCompletionRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateCompletionRequest.g.verified.cs index 5df55f127a..5e40cd9475 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateCompletionRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateCompletionRequest.g.verified.cs @@ -12,7 +12,7 @@ namespace G public sealed partial class CreateCompletionRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.AnyOfJsonConverter))] @@ -47,7 +47,7 @@ public sealed partial class CreateCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("frequency_penalty")] @@ -90,7 +90,7 @@ public sealed partial class CreateCompletionRequest /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 ///
[global::System.Text.Json.Serialization.JsonPropertyName("presence_penalty")] @@ -153,7 +153,7 @@ public sealed partial class CreateCompletionRequest public double? TopP { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -170,7 +170,7 @@ public sealed partial class CreateCompletionRequest /// Initializes a new instance of the class. ///
/// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
@@ -189,7 +189,7 @@ public sealed partial class CreateCompletionRequest /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -215,7 +215,7 @@ public sealed partial class CreateCompletionRequest /// /// /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- /// [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
+ /// [See more information about frequency and presence penalties.](/docs/guides/text-generation)
/// Default Value: 0 /// /// @@ -250,7 +250,7 @@ public sealed partial class CreateCompletionRequest /// Example: 1 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs index 3eab56c05d..8035d88b0c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateEmbeddingRequest.g.verified.cs @@ -22,7 +22,7 @@ public sealed partial class CreateEmbeddingRequest public required global::G.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>> Input { get; set; } /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small ///
/// text-embedding-3-small @@ -48,7 +48,7 @@ public sealed partial class CreateEmbeddingRequest public int? Dimensions { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -69,7 +69,7 @@ public sealed partial class CreateEmbeddingRequest /// Example: The quick brown fox jumped over the lazy dog /// /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
+ /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.
/// Example: text-embedding-3-small /// /// @@ -81,7 +81,7 @@ public sealed partial class CreateEmbeddingRequest /// The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs index e2dac63f58..26eae38a09 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateFineTuningJobRequest.g.verified.cs @@ -13,7 +13,7 @@ public sealed partial class CreateFineTuningJobRequest { /// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini ///
/// gpt-4o-mini @@ -42,7 +42,7 @@ public sealed partial class CreateFineTuningJobRequest public global::G.CreateFineTuningJobRequestHyperparameters? Hyperparameters { get; set; } /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. ///
[global::System.Text.Json.Serialization.JsonPropertyName("suffix")] @@ -88,7 +88,7 @@ public sealed partial class CreateFineTuningJobRequest ///
/// /// The name of the model to fine-tune. You can select one of the
- /// [supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ /// [supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
/// Example: gpt-4o-mini /// /// @@ -103,7 +103,7 @@ public sealed partial class CreateFineTuningJobRequest /// The hyperparameters used for the fine-tuning job. /// /// - /// A string of up to 18 characters that will be added to your fine-tuned model name.
+ /// A string of up to 64 characters that will be added to your fine-tuned model name.
/// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageEditRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageEditRequest.g.verified.cs index 61301b1ccb..c4b7618e86 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageEditRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageEditRequest.g.verified.cs @@ -86,7 +86,7 @@ public sealed partial class CreateImageEditRequest public global::G.CreateImageEditRequestResponseFormat? ResponseFormat { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -139,7 +139,7 @@ public sealed partial class CreateImageEditRequest /// Example: url /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageRequest.g.verified.cs index d41a34792a..4e0e1e0000 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageRequest.g.verified.cs @@ -80,7 +80,7 @@ public sealed partial class CreateImageRequest public global::G.CreateImageRequestStyle? Style { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -131,7 +131,7 @@ public sealed partial class CreateImageRequest /// Example: vivid /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageVariationRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageVariationRequest.g.verified.cs index 319e646d54..4330e0940b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageVariationRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateImageVariationRequest.g.verified.cs @@ -65,7 +65,7 @@ public sealed partial class CreateImageVariationRequest public global::G.CreateImageVariationRequestSize? Size { get; set; } /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 ///
/// user-1234 @@ -108,7 +108,7 @@ public sealed partial class CreateImageVariationRequest /// Example: 1024x1024 /// /// - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).
/// Example: user-1234 /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequest.g.verified.cs index ce4a2cdbd5..7e42d2dbd2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequest.g.verified.cs @@ -36,7 +36,7 @@ public sealed partial class CreateMessageRequest public global::System.Collections.Generic.IList? Attachments { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -60,7 +60,7 @@ public sealed partial class CreateMessageRequest /// A list of files attached to the message, and the tools they should be added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateMessageRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs index b3a5f4c0b8..58039f503f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestAttachment.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class CreateMessageRequestAttachment /// The tools to add this file to. ///
[global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -39,7 +39,7 @@ public sealed partial class CreateMessageRequestAttachment [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateMessageRequestAttachment( string? fileId, - global::System.Collections.Generic.IList? tools) + global::System.Collections.Generic.IList? tools) { this.FileId = fileId; this.Tools = tools; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs index 319631a250..457f8eafcd 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateMessageRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateMessageRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequest.g.verified.cs index 8195f35cc0..34eec6402d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequest.g.verified.cs @@ -12,20 +12,22 @@ namespace G public sealed partial class CreateModerationRequest { /// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. ///
[global::System.Text.Json.Serialization.JsonPropertyName("input")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.OneOfJsonConverter>))] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.OneOfJsonConverter, global::System.Collections.Generic.IList>))] [global::System.Text.Json.Serialization.JsonRequired] - public required global::G.OneOf> Input { get; set; } + public required global::G.OneOf, global::System.Collections.Generic.IList> Input { get; set; } /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 ///
- /// text-moderation-stable + /// omni-moderation-2024-09-26 [global::System.Text.Json.Serialization.JsonPropertyName("model")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.AnyOfJsonConverter))] public global::G.AnyOf? Model { get; set; } @@ -40,17 +42,19 @@ public sealed partial class CreateModerationRequest /// Initializes a new instance of the class. ///
/// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. /// /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateModerationRequest( - global::G.OneOf> input, + global::G.OneOf, global::System.Collections.Generic.IList> input, global::G.AnyOf? model) { this.Input = input; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.verified.cs new file mode 100644 index 0000000000..b9639e48be --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemDiscriminator + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CreateModerationRequestInputVariant3ItemDiscriminator? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CreateModerationRequestInputVariant3ItemDiscriminator), + jsonSerializerContext) as global::G.CreateModerationRequestInputVariant3ItemDiscriminator; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemDiscriminator? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CreateModerationRequestInputVariant3ItemDiscriminator), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CreateModerationRequestInputVariant3ItemDiscriminator; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.verified.cs new file mode 100644 index 0000000000..379e2672c0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.verified.cs @@ -0,0 +1,43 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemDiscriminator.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class CreateModerationRequestInputVariant3ItemDiscriminator + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorTypeJsonConverter))] + public global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CreateModerationRequestInputVariant3ItemDiscriminator( + global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? type) + { + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemDiscriminator() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs new file mode 100644 index 0000000000..20b6bdeb3b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationRequestInputVariant3ItemDiscriminatorType + { + /// + /// + /// + ImageUrl, + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationRequestInputVariant3ItemDiscriminatorTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationRequestInputVariant3ItemDiscriminatorType value) + { + return value switch + { + CreateModerationRequestInputVariant3ItemDiscriminatorType.ImageUrl => "image_url", + CreateModerationRequestInputVariant3ItemDiscriminatorType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationRequestInputVariant3ItemDiscriminatorType? ToEnum(string value) + { + return value switch + { + "image_url" => CreateModerationRequestInputVariant3ItemDiscriminatorType.ImageUrl, + "text" => CreateModerationRequestInputVariant3ItemDiscriminatorType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.verified.cs new file mode 100644 index 0000000000..6686212b11 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1 + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CreateModerationRequestInputVariant3ItemVariant1? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1), + jsonSerializerContext) as global::G.CreateModerationRequestInputVariant3ItemVariant1; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemVariant1? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CreateModerationRequestInputVariant3ItemVariant1; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.verified.cs new file mode 100644 index 0000000000..917a746037 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1.g.cs + +#nullable enable + +namespace G +{ + /// + /// An object describing an image to classify. + /// + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1 + { + /// + /// Always `image_url`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeJsonConverter))] + public global::G.CreateModerationRequestInputVariant3ItemVariant1Type Type { get; set; } + + /// + /// Contains either an image URL or a data URL for a base64 encoded image. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("image_url")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl ImageUrl { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Always `image_url`. + /// + /// + /// Contains either an image URL or a data URL for a base64 encoded image. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CreateModerationRequestInputVariant3ItemVariant1( + global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl imageUrl, + global::G.CreateModerationRequestInputVariant3ItemVariant1Type type) + { + this.ImageUrl = imageUrl ?? throw new global::System.ArgumentNullException(nameof(imageUrl)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemVariant1() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.verified.cs new file mode 100644 index 0000000000..b0aae18f32 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1ImageUrl + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl), + jsonSerializerContext) as global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.verified.cs new file mode 100644 index 0000000000..082aaded83 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.verified.cs @@ -0,0 +1,48 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1ImageUrl.g.cs + +#nullable enable + +namespace G +{ + /// + /// Contains either an image URL or a data URL for a base64 encoded image. + /// + public sealed partial class CreateModerationRequestInputVariant3ItemVariant1ImageUrl + { + /// + /// Either a URL of the image or the base64 encoded image data.
+ /// Example: https://example.com/image.jpg + ///
+ /// https://example.com/image.jpg + [global::System.Text.Json.Serialization.JsonPropertyName("url")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Url { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Either a URL of the image or the base64 encoded image data.
+ /// Example: https://example.com/image.jpg + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CreateModerationRequestInputVariant3ItemVariant1ImageUrl( + string url) + { + this.Url = url ?? throw new global::System.ArgumentNullException(nameof(url)); + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemVariant1ImageUrl() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs new file mode 100644 index 0000000000..bb71fc33e6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant1Type.g.cs + +#nullable enable + +namespace G +{ + /// + /// Always `image_url`. + /// + public enum CreateModerationRequestInputVariant3ItemVariant1Type + { + /// + /// + /// + ImageUrl, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationRequestInputVariant3ItemVariant1TypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationRequestInputVariant3ItemVariant1Type value) + { + return value switch + { + CreateModerationRequestInputVariant3ItemVariant1Type.ImageUrl => "image_url", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationRequestInputVariant3ItemVariant1Type? ToEnum(string value) + { + return value switch + { + "image_url" => CreateModerationRequestInputVariant3ItemVariant1Type.ImageUrl, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.verified.cs new file mode 100644 index 0000000000..e404979b3b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant2.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationRequestInputVariant3ItemVariant2 + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CreateModerationRequestInputVariant3ItemVariant2? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant2), + jsonSerializerContext) as global::G.CreateModerationRequestInputVariant3ItemVariant2; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationRequestInputVariant3ItemVariant2? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant2), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CreateModerationRequestInputVariant3ItemVariant2; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.verified.cs new file mode 100644 index 0000000000..7c47749d30 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.verified.cs @@ -0,0 +1,60 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant2.g.cs + +#nullable enable + +namespace G +{ + /// + /// An object describing text to classify. + /// + public sealed partial class CreateModerationRequestInputVariant3ItemVariant2 + { + /// + /// Always `text`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeJsonConverter))] + public global::G.CreateModerationRequestInputVariant3ItemVariant2Type Type { get; set; } + + /// + /// A string of text to classify.
+ /// Example: I want to kill them + ///
+ /// I want to kill them + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Text { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Always `text`. + /// + /// + /// A string of text to classify.
+ /// Example: I want to kill them + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CreateModerationRequestInputVariant3ItemVariant2( + string text, + global::G.CreateModerationRequestInputVariant3ItemVariant2Type type) + { + this.Text = text ?? throw new global::System.ArgumentNullException(nameof(text)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationRequestInputVariant3ItemVariant2() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs new file mode 100644 index 0000000000..e68a1d6c8c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationRequestInputVariant3ItemVariant2Type.g.cs + +#nullable enable + +namespace G +{ + /// + /// Always `text`. + /// + public enum CreateModerationRequestInputVariant3ItemVariant2Type + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationRequestInputVariant3ItemVariant2TypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationRequestInputVariant3ItemVariant2Type value) + { + return value switch + { + CreateModerationRequestInputVariant3ItemVariant2Type.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationRequestInputVariant3ItemVariant2Type? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationRequestInputVariant3ItemVariant2Type.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestModel.g.verified.cs index f537b4c1ec..d873e9d60d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationRequestModel.g.verified.cs @@ -9,6 +9,14 @@ namespace G ///
public enum CreateModerationRequestModel { + /// + /// + /// + OmniModerationLatest, + /// + /// + /// + OmniModeration20240926, /// /// /// @@ -31,6 +39,8 @@ public static string ToValueString(this CreateModerationRequestModel value) { return value switch { + CreateModerationRequestModel.OmniModerationLatest => "omni-moderation-latest", + CreateModerationRequestModel.OmniModeration20240926 => "omni-moderation-2024-09-26", CreateModerationRequestModel.TextModerationLatest => "text-moderation-latest", CreateModerationRequestModel.TextModerationStable => "text-moderation-stable", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), @@ -43,6 +53,8 @@ public static string ToValueString(this CreateModerationRequestModel value) { return value switch { + "omni-moderation-latest" => CreateModerationRequestModel.OmniModerationLatest, + "omni-moderation-2024-09-26" => CreateModerationRequestModel.OmniModeration20240926, "text-moderation-latest" => CreateModerationRequestModel.TextModerationLatest, "text-moderation-stable" => CreateModerationRequestModel.TextModerationStable, _ => null, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResult.g.verified.cs index 57cd960b8e..37ebfd61ab 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResult.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResult.g.verified.cs @@ -30,6 +30,13 @@ public sealed partial class CreateModerationResponseResult [global::System.Text.Json.Serialization.JsonRequired] public required global::G.CreateModerationResponseResultCategoryScores CategoryScores { get; set; } + /// + /// A list of the categories along with the input type(s) that the score applies to. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("category_applied_input_types")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.CreateModerationResponseResultCategoryAppliedInputTypes CategoryAppliedInputTypes { get; set; } + /// /// Additional properties that are not explicitly defined in the schema /// @@ -48,15 +55,20 @@ public sealed partial class CreateModerationResponseResult /// /// A list of the categories along with their scores as predicted by model. /// + /// + /// A list of the categories along with the input type(s) that the score applies to. + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateModerationResponseResult( bool flagged, global::G.CreateModerationResponseResultCategories categories, - global::G.CreateModerationResponseResultCategoryScores categoryScores) + global::G.CreateModerationResponseResultCategoryScores categoryScores, + global::G.CreateModerationResponseResultCategoryAppliedInputTypes categoryAppliedInputTypes) { this.Flagged = flagged; this.Categories = categories ?? throw new global::System.ArgumentNullException(nameof(categories)); this.CategoryScores = categoryScores ?? throw new global::System.ArgumentNullException(nameof(categoryScores)); + this.CategoryAppliedInputTypes = categoryAppliedInputTypes ?? throw new global::System.ArgumentNullException(nameof(categoryAppliedInputTypes)); } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs index 82f3611e86..bc640f082b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategories.g.verified.cs @@ -37,6 +37,20 @@ public sealed partial class CreateModerationResponseResultCategories [global::System.Text.Json.Serialization.JsonRequired] public required bool HarassmentThreatening { get; set; } + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("illicit")] + [global::System.Text.Json.Serialization.JsonRequired] + public required bool Illicit { get; set; } + + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("illicit/violent")] + [global::System.Text.Json.Serialization.JsonRequired] + public required bool IllicitViolent { get; set; } + /// /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. /// @@ -107,6 +121,12 @@ public sealed partial class CreateModerationResponseResultCategories /// /// Harassment content that also includes violence or serious harm towards any target. /// + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category. + /// + /// + /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon. + /// /// /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. /// @@ -134,6 +154,8 @@ public CreateModerationResponseResultCategories( bool hateThreatening, bool harassment, bool harassmentThreatening, + bool illicit, + bool illicitViolent, bool selfHarm, bool selfHarmIntent, bool selfHarmInstructions, @@ -146,6 +168,8 @@ public CreateModerationResponseResultCategories( this.HateThreatening = hateThreatening; this.Harassment = harassment; this.HarassmentThreatening = harassmentThreatening; + this.Illicit = illicit; + this.IllicitViolent = illicitViolent; this.SelfHarm = selfHarm; this.SelfHarmIntent = selfHarmIntent; this.SelfHarmInstructions = selfHarmInstructions; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.verified.cs new file mode 100644 index 0000000000..977558ae11 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class CreateModerationResponseResultCategoryAppliedInputTypes + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.CreateModerationResponseResultCategoryAppliedInputTypes? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.CreateModerationResponseResultCategoryAppliedInputTypes), + jsonSerializerContext) as global::G.CreateModerationResponseResultCategoryAppliedInputTypes; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.CreateModerationResponseResultCategoryAppliedInputTypes? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.CreateModerationResponseResultCategoryAppliedInputTypes), + jsonSerializerContext).ConfigureAwait(false)) as global::G.CreateModerationResponseResultCategoryAppliedInputTypes; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.verified.cs new file mode 100644 index 0000000000..65c9f8a4d0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.verified.cs @@ -0,0 +1,189 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypes.g.cs + +#nullable enable + +namespace G +{ + /// + /// A list of the categories along with the input type(s) that the score applies to. + /// + public sealed partial class CreateModerationResponseResultCategoryAppliedInputTypes + { + /// + /// The applied input type(s) for the category 'hate'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("hate")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Hate { get; set; } + + /// + /// The applied input type(s) for the category 'hate/threatening'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("hate/threatening")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList HateThreatening { get; set; } + + /// + /// The applied input type(s) for the category 'harassment'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("harassment")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Harassment { get; set; } + + /// + /// The applied input type(s) for the category 'harassment/threatening'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("harassment/threatening")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList HarassmentThreatening { get; set; } + + /// + /// The applied input type(s) for the category 'illicit'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("illicit")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Illicit { get; set; } + + /// + /// The applied input type(s) for the category 'illicit/violent'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("illicit/violent")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList IllicitViolent { get; set; } + + /// + /// The applied input type(s) for the category 'self-harm'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("self-harm")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList SelfHarm { get; set; } + + /// + /// The applied input type(s) for the category 'self-harm/intent'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("self-harm/intent")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList SelfHarmIntent { get; set; } + + /// + /// The applied input type(s) for the category 'self-harm/instructions'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("self-harm/instructions")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList SelfHarmInstructions { get; set; } + + /// + /// The applied input type(s) for the category 'sexual'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("sexual")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Sexual { get; set; } + + /// + /// The applied input type(s) for the category 'sexual/minors'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("sexual/minors")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList SexualMinors { get; set; } + + /// + /// The applied input type(s) for the category 'violence'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("violence")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Violence { get; set; } + + /// + /// The applied input type(s) for the category 'violence/graphic'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("violence/graphic")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList ViolenceGraphic { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The applied input type(s) for the category 'hate'. + /// + /// + /// The applied input type(s) for the category 'hate/threatening'. + /// + /// + /// The applied input type(s) for the category 'harassment'. + /// + /// + /// The applied input type(s) for the category 'harassment/threatening'. + /// + /// + /// The applied input type(s) for the category 'illicit'. + /// + /// + /// The applied input type(s) for the category 'illicit/violent'. + /// + /// + /// The applied input type(s) for the category 'self-harm'. + /// + /// + /// The applied input type(s) for the category 'self-harm/intent'. + /// + /// + /// The applied input type(s) for the category 'self-harm/instructions'. + /// + /// + /// The applied input type(s) for the category 'sexual'. + /// + /// + /// The applied input type(s) for the category 'sexual/minors'. + /// + /// + /// The applied input type(s) for the category 'violence'. + /// + /// + /// The applied input type(s) for the category 'violence/graphic'. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public CreateModerationResponseResultCategoryAppliedInputTypes( + global::System.Collections.Generic.IList hate, + global::System.Collections.Generic.IList hateThreatening, + global::System.Collections.Generic.IList harassment, + global::System.Collections.Generic.IList harassmentThreatening, + global::System.Collections.Generic.IList illicit, + global::System.Collections.Generic.IList illicitViolent, + global::System.Collections.Generic.IList selfHarm, + global::System.Collections.Generic.IList selfHarmIntent, + global::System.Collections.Generic.IList selfHarmInstructions, + global::System.Collections.Generic.IList sexual, + global::System.Collections.Generic.IList sexualMinors, + global::System.Collections.Generic.IList violence, + global::System.Collections.Generic.IList violenceGraphic) + { + this.Hate = hate ?? throw new global::System.ArgumentNullException(nameof(hate)); + this.HateThreatening = hateThreatening ?? throw new global::System.ArgumentNullException(nameof(hateThreatening)); + this.Harassment = harassment ?? throw new global::System.ArgumentNullException(nameof(harassment)); + this.HarassmentThreatening = harassmentThreatening ?? throw new global::System.ArgumentNullException(nameof(harassmentThreatening)); + this.Illicit = illicit ?? throw new global::System.ArgumentNullException(nameof(illicit)); + this.IllicitViolent = illicitViolent ?? throw new global::System.ArgumentNullException(nameof(illicitViolent)); + this.SelfHarm = selfHarm ?? throw new global::System.ArgumentNullException(nameof(selfHarm)); + this.SelfHarmIntent = selfHarmIntent ?? throw new global::System.ArgumentNullException(nameof(selfHarmIntent)); + this.SelfHarmInstructions = selfHarmInstructions ?? throw new global::System.ArgumentNullException(nameof(selfHarmInstructions)); + this.Sexual = sexual ?? throw new global::System.ArgumentNullException(nameof(sexual)); + this.SexualMinors = sexualMinors ?? throw new global::System.ArgumentNullException(nameof(sexualMinors)); + this.Violence = violence ?? throw new global::System.ArgumentNullException(nameof(violence)); + this.ViolenceGraphic = violenceGraphic ?? throw new global::System.ArgumentNullException(nameof(violenceGraphic)); + } + + /// + /// Initializes a new instance of the class. + /// + public CreateModerationResponseResultCategoryAppliedInputTypes() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs new file mode 100644 index 0000000000..8f2a6c6e0a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs new file mode 100644 index 0000000000..209115cb3e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs new file mode 100644 index 0000000000..2849df9fd0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesHateItem + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHateItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHateItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHateItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHateItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHateItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs new file mode 100644 index 0000000000..6c8825b61d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs new file mode 100644 index 0000000000..d8141c1bcc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs new file mode 100644 index 0000000000..45d4c9a797 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs new file mode 100644 index 0000000000..229c0643fe --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction + { + /// + /// + /// + Text, + /// + /// + /// + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs new file mode 100644 index 0000000000..bfc06774b8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem + { + /// + /// + /// + Text, + /// + /// + /// + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs new file mode 100644 index 0000000000..3bd322387b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem + { + /// + /// + /// + Text, + /// + /// + /// + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs new file mode 100644 index 0000000000..bc8096bd79 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesSexualItem + { + /// + /// + /// + Text, + /// + /// + /// + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSexualItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSexualItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs new file mode 100644 index 0000000000..1c74782034 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs new file mode 100644 index 0000000000..459821543c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem + { + /// + /// + /// + Text, + /// + /// + /// + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs new file mode 100644 index 0000000000..667c4cfd01 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem + { + /// + /// + /// + Text, + /// + /// + /// + Image, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem value) + { + return value switch + { + CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Text => "text", + CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Image => "image", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? ToEnum(string value) + { + return value switch + { + "text" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Text, + "image" => CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.Image, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs index bc34da245d..32fab67053 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateModerationResponseResultCategoryScores.g.verified.cs @@ -37,6 +37,20 @@ public sealed partial class CreateModerationResponseResultCategoryScores [global::System.Text.Json.Serialization.JsonRequired] public required double HarassmentThreatening { get; set; } + /// + /// The score for the category 'illicit'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("illicit")] + [global::System.Text.Json.Serialization.JsonRequired] + public required double Illicit { get; set; } + + /// + /// The score for the category 'illicit/violent'. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("illicit/violent")] + [global::System.Text.Json.Serialization.JsonRequired] + public required double IllicitViolent { get; set; } + /// /// The score for the category 'self-harm'. /// @@ -107,6 +121,12 @@ public sealed partial class CreateModerationResponseResultCategoryScores /// /// The score for the category 'harassment/threatening'. /// + /// + /// The score for the category 'illicit'. + /// + /// + /// The score for the category 'illicit/violent'. + /// /// /// The score for the category 'self-harm'. /// @@ -134,6 +154,8 @@ public CreateModerationResponseResultCategoryScores( double hateThreatening, double harassment, double harassmentThreatening, + double illicit, + double illicitViolent, double selfHarm, double selfHarmIntent, double selfHarmInstructions, @@ -146,6 +168,8 @@ public CreateModerationResponseResultCategoryScores( this.HateThreatening = hateThreatening; this.Harassment = harassment; this.HarassmentThreatening = harassmentThreatening; + this.Illicit = illicit; + this.IllicitViolent = illicitViolent; this.SelfHarm = selfHarm; this.SelfHarmIntent = selfHarmIntent; this.SelfHarmInstructions = selfHarmInstructions; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunIncludeItem.g.verified.cs new file mode 100644 index 0000000000..965b2b8e94 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunIncludeItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.CreateRunIncludeItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum CreateRunIncludeItem + { + /// + /// + /// + StepDetailsToolCallsAnyFileSearchResultsAnyContent, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class CreateRunIncludeItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this CreateRunIncludeItem value) + { + return value switch + { + CreateRunIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent => "step_details.tool_calls[*].file_search.results[*].content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static CreateRunIncludeItem? ToEnum(string value) + { + return value switch + { + "step_details.tool_calls[*].file_search.results[*].content" => CreateRunIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequest.g.verified.cs index 797646e43a..e35b19a006 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequest.g.verified.cs @@ -49,16 +49,16 @@ public sealed partial class CreateRunRequest /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// [global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -67,7 +67,8 @@ public sealed partial class CreateRunRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -111,15 +112,16 @@ public sealed partial class CreateRunRequest public global::G.AssistantsApiToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] @@ -155,15 +157,16 @@ public sealed partial class CreateRunRequest /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -187,12 +190,13 @@ public sealed partial class CreateRunRequest /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] @@ -202,7 +206,7 @@ public CreateRunRequest( string? instructions, string? additionalInstructions, global::System.Collections.Generic.IList? additionalMessages, - global::System.Collections.Generic.IList? tools, + global::System.Collections.Generic.IList? tools, object? metadata, double? temperature, double? topP, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs index 733563071c..d55c8e2a1a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateRunRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestModel.g.verified.cs index d8e63fae67..0d5efc10ea 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateRunRequestModel.g.verified.cs @@ -16,6 +16,10 @@ public enum CreateRunRequestModel /// /// /// + Gpt4o20241120, + /// + /// + /// Gpt4o20240806, /// /// @@ -116,6 +120,7 @@ public static string ToValueString(this CreateRunRequestModel value) return value switch { CreateRunRequestModel.Gpt4o => "gpt-4o", + CreateRunRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateRunRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateRunRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", CreateRunRequestModel.Gpt4oMini => "gpt-4o-mini", @@ -149,6 +154,7 @@ public static string ToValueString(this CreateRunRequestModel value) return value switch { "gpt-4o" => CreateRunRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateRunRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateRunRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateRunRequestModel.Gpt4o20240513, "gpt-4o-mini" => CreateRunRequestModel.Gpt4oMini, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequest.g.verified.cs index 5b96df06ff..14ee3fe7be 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequest.g.verified.cs @@ -12,7 +12,7 @@ namespace G public sealed partial class CreateSpeechRequest { /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.AnyOfJsonConverter))] @@ -27,7 +27,7 @@ public sealed partial class CreateSpeechRequest public required string Input { get; set; } /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// [global::System.Text.Json.Serialization.JsonPropertyName("voice")] [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateSpeechRequestVoiceJsonConverter))] @@ -59,13 +59,13 @@ public sealed partial class CreateSpeechRequest /// Initializes a new instance of the class. /// /// - /// One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + /// One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd` /// /// /// The text to generate audio for. The maximum length is 4096 characters. /// /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// /// /// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs index ddc4325fc4..6338233cac 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateSpeechRequestVoice.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + /// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). /// public enum CreateSpeechRequestVoice { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs index 01bcc11dea..b4005a9a69 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequest.g.verified.cs @@ -43,7 +43,7 @@ public sealed partial class CreateThreadAndRunRequest /// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. ///
[global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @@ -52,13 +52,13 @@ public sealed partial class CreateThreadAndRunRequest public global::G.CreateThreadAndRunRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -67,7 +67,8 @@ public sealed partial class CreateThreadAndRunRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -111,15 +112,16 @@ public sealed partial class CreateThreadAndRunRequest public global::G.AssistantsApiToolChoiceOption? ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] @@ -153,15 +155,16 @@ public sealed partial class CreateThreadAndRunRequest /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// @@ -185,12 +188,13 @@ public sealed partial class CreateThreadAndRunRequest /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] @@ -199,7 +203,7 @@ public CreateThreadAndRunRequest( global::G.CreateThreadRequest? thread, global::G.AnyOf? model, string? instructions, - global::System.Collections.Generic.IList? tools, + global::System.Collections.Generic.IList? tools, global::G.CreateThreadAndRunRequestToolResources? toolResources, object? metadata, double? temperature, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs index dcb178e30f..a5253553f9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateThreadAndRunRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs index b1d489f6a5..97ba730435 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestModel.g.verified.cs @@ -16,6 +16,10 @@ public enum CreateThreadAndRunRequestModel /// /// /// + Gpt4o20241120, + /// + /// + /// Gpt4o20240806, /// /// @@ -116,6 +120,7 @@ public static string ToValueString(this CreateThreadAndRunRequestModel value) return value switch { CreateThreadAndRunRequestModel.Gpt4o => "gpt-4o", + CreateThreadAndRunRequestModel.Gpt4o20241120 => "gpt-4o-2024-11-20", CreateThreadAndRunRequestModel.Gpt4o20240806 => "gpt-4o-2024-08-06", CreateThreadAndRunRequestModel.Gpt4o20240513 => "gpt-4o-2024-05-13", CreateThreadAndRunRequestModel.Gpt4oMini => "gpt-4o-mini", @@ -149,6 +154,7 @@ public static string ToValueString(this CreateThreadAndRunRequestModel value) return value switch { "gpt-4o" => CreateThreadAndRunRequestModel.Gpt4o, + "gpt-4o-2024-11-20" => CreateThreadAndRunRequestModel.Gpt4o20241120, "gpt-4o-2024-08-06" => CreateThreadAndRunRequestModel.Gpt4o20240806, "gpt-4o-2024-05-13" => CreateThreadAndRunRequestModel.Gpt4o20240513, "gpt-4o-mini" => CreateThreadAndRunRequestModel.Gpt4oMini, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs index 870ae7d3f7..dcd4e62978 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadAndRunRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class CreateThreadAndRunRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class CreateThreadAndRunRequestToolResourcesCodeInterprete /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateThreadAndRunRequestToolResourcesCodeInterpreter( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequest.g.verified.cs index 6c7592c5dd..7e83d0e552 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequest.g.verified.cs @@ -22,7 +22,7 @@ public sealed partial class CreateThreadRequest public global::G.CreateThreadRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -43,7 +43,7 @@ public sealed partial class CreateThreadRequest /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateThreadRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs index 07d6b80e74..806386c538 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateThreadRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs index 1f566d440a..2e31e28b96 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class CreateThreadRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class CreateThreadRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateThreadRequestToolResourcesCodeInterpreter( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs index 526ca7675e..6d5c0d4bcf 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStore.g.verified.cs @@ -23,7 +23,7 @@ public sealed partial class CreateThreadRequestToolResourcesFileSearchVectorStor public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? ChunkingStrategy { get; set; } /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -44,7 +44,7 @@ public sealed partial class CreateThreadRequestToolResourcesFileSearchVectorStor /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. /// /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateThreadRequestToolResourcesFileSearchVectorStore( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs index 41945dd7b3..360c384c3d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs index 0248285722..ffc7a311a2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranscriptionRequest.g.verified.cs @@ -42,18 +42,18 @@ public sealed partial class CreateTranscriptionRequest public string? Language { get; set; } /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// [global::System.Text.Json.Serialization.JsonPropertyName("prompt")] public string? Prompt { get; set; } /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.CreateTranscriptionRequestResponseFormatJsonConverter))] - public global::G.CreateTranscriptionRequestResponseFormat? ResponseFormat { get; set; } + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.AudioResponseFormatJsonConverter))] + public global::G.AudioResponseFormat? ResponseFormat { get; set; } /// /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
@@ -92,10 +92,10 @@ public sealed partial class CreateTranscriptionRequest /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -113,7 +113,7 @@ public CreateTranscriptionRequest( global::G.AnyOf model, string? language, string? prompt, - global::G.CreateTranscriptionRequestResponseFormat? responseFormat, + global::G.AudioResponseFormat? responseFormat, double? temperature, global::System.Collections.Generic.IList? timestampGranularities) { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranslationRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranslationRequest.g.verified.cs index 0e1cf94390..e2108e0c07 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranslationRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateTranslationRequest.g.verified.cs @@ -36,17 +36,18 @@ public sealed partial class CreateTranslationRequest public required global::G.AnyOf Model { get; set; } /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// [global::System.Text.Json.Serialization.JsonPropertyName("prompt")] public string? Prompt { get; set; } /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] - public string? ResponseFormat { get; set; } + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.AudioResponseFormatJsonConverter))] + public global::G.AudioResponseFormat? ResponseFormat { get; set; } /// /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
@@ -75,10 +76,10 @@ public sealed partial class CreateTranslationRequest /// Example: whisper-1 /// /// - /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + /// An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. /// /// - /// The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
+ /// The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
/// Default Value: json /// /// @@ -91,7 +92,7 @@ public CreateTranslationRequest( string filename, global::G.AnyOf model, string? prompt, - string? responseFormat, + global::G.AudioResponseFormat? responseFormat, double? temperature) { this.File = file ?? throw new global::System.ArgumentNullException(nameof(file)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs index 18c2b973ce..dc7be90f79 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequest.g.verified.cs @@ -35,7 +35,7 @@ public sealed partial class CreateVectorStoreRequest public global::G.CreateVectorStoreRequestChunkingStrategy? ChunkingStrategy { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -62,7 +62,7 @@ public sealed partial class CreateVectorStoreRequest /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public CreateVectorStoreRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs index 78464a3a59..0b9baf4160 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.CreateVectorStoreRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class CreateVectorStoreRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ErrorEvent.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ErrorEvent.g.verified.cs index f9cf42a7e6..ca271b3f8f 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ErrorEvent.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ErrorEvent.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + /// Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. /// public sealed partial class ErrorEvent { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptions.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptions.Json.g.verified.cs new file mode 100644 index 0000000000..7c67bc83ca --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptions.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.FileSearchRankingOptions.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class FileSearchRankingOptions + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.FileSearchRankingOptions? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.FileSearchRankingOptions), + jsonSerializerContext) as global::G.FileSearchRankingOptions; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.FileSearchRankingOptions? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.FileSearchRankingOptions), + jsonSerializerContext).ConfigureAwait(false)) as global::G.FileSearchRankingOptions; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptions.g.verified.cs new file mode 100644 index 0000000000..5c3ae22a83 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptions.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.FileSearchRankingOptions.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.
+ /// See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + ///
+ public sealed partial class FileSearchRankingOptions + { + /// + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("ranker")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.FileSearchRankingOptionsRankerJsonConverter))] + public global::G.FileSearchRankingOptionsRanker? Ranker { get; set; } + + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("score_threshold")] + [global::System.Text.Json.Serialization.JsonRequired] + public required double ScoreThreshold { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + /// + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public FileSearchRankingOptions( + double scoreThreshold, + global::G.FileSearchRankingOptionsRanker? ranker) + { + this.ScoreThreshold = scoreThreshold; + this.Ranker = ranker; + } + + /// + /// Initializes a new instance of the class. + /// + public FileSearchRankingOptions() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptionsRanker.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptionsRanker.g.verified.cs new file mode 100644 index 0000000000..5c55286302 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FileSearchRankingOptionsRanker.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.FileSearchRankingOptionsRanker.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranker to use for the file search. If not specified will use the `auto` ranker. + /// + public enum FileSearchRankingOptionsRanker + { + /// + /// + /// + Auto, + /// + /// + /// + Default20240821, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class FileSearchRankingOptionsRankerExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this FileSearchRankingOptionsRanker value) + { + return value switch + { + FileSearchRankingOptionsRanker.Auto => "auto", + FileSearchRankingOptionsRanker.Default20240821 => "default_2024_08_21", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static FileSearchRankingOptionsRanker? ToEnum(string value) + { + return value switch + { + "auto" => FileSearchRankingOptionsRanker.Auto, + "default_2024_08_21" => FileSearchRankingOptionsRanker.Default20240821, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs index aec17d7707..77d03dcc10 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.FinetuneChatRequestInput.g.verified.cs @@ -24,7 +24,8 @@ public sealed partial class FinetuneChatRequestInput public global::System.Collections.Generic.IList? Tools { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
[global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] public bool? ParallelToolCalls { get; set; } @@ -50,7 +51,8 @@ public sealed partial class FinetuneChatRequestInput /// A list of tools the model may generate JSON inputs for. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public FinetuneChatRequestInput( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.GetRunStepIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.GetRunStepIncludeItem.g.verified.cs new file mode 100644 index 0000000000..cad9820f76 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.GetRunStepIncludeItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.GetRunStepIncludeItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum GetRunStepIncludeItem + { + /// + /// + /// + StepDetailsToolCallsAnyFileSearchResultsAnyContent, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class GetRunStepIncludeItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this GetRunStepIncludeItem value) + { + return value switch + { + GetRunStepIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent => "step_details.tool_calls[*].file_search.results[*].content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static GetRunStepIncludeItem? ToEnum(string value) + { + return value switch + { + "step_details.tool_calls[*].file_search.results[*].content" => GetRunStepIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.InputVariant3Item.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.InputVariant3Item.Json.g.verified.cs new file mode 100644 index 0000000000..813f7480e5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.InputVariant3Item.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.InputVariant3Item.Json.g.cs +#nullable enable + +namespace G +{ + public readonly partial struct InputVariant3Item + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.InputVariant3Item? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.InputVariant3Item), + jsonSerializerContext) as global::G.InputVariant3Item?; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.InputVariant3Item? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.InputVariant3Item), + jsonSerializerContext).ConfigureAwait(false)) as global::G.InputVariant3Item?; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.InputVariant3Item.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.InputVariant3Item.g.verified.cs new file mode 100644 index 0000000000..f4880ac123 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.InputVariant3Item.g.verified.cs @@ -0,0 +1,223 @@ +//HintName: G.Models.InputVariant3Item.g.cs +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// + /// + public readonly partial struct InputVariant3Item : global::System.IEquatable + { + /// + /// + /// + public global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type { get; } + + /// + /// An object describing an image to classify. + /// +#if NET6_0_OR_GREATER + public global::G.CreateModerationRequestInputVariant3ItemVariant1? ImageUrl { get; init; } +#else + public global::G.CreateModerationRequestInputVariant3ItemVariant1? ImageUrl { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(ImageUrl))] +#endif + public bool IsImageUrl => ImageUrl != null; + + /// + /// + /// + public static implicit operator InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant1 value) => new InputVariant3Item(value); + + /// + /// + /// + public static implicit operator global::G.CreateModerationRequestInputVariant3ItemVariant1?(InputVariant3Item @this) => @this.ImageUrl; + + /// + /// + /// + public InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant1? value) + { + ImageUrl = value; + } + + /// + /// An object describing text to classify. + /// +#if NET6_0_OR_GREATER + public global::G.CreateModerationRequestInputVariant3ItemVariant2? Text { get; init; } +#else + public global::G.CreateModerationRequestInputVariant3ItemVariant2? Text { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Text))] +#endif + public bool IsText => Text != null; + + /// + /// + /// + public static implicit operator InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant2 value) => new InputVariant3Item(value); + + /// + /// + /// + public static implicit operator global::G.CreateModerationRequestInputVariant3ItemVariant2?(InputVariant3Item @this) => @this.Text; + + /// + /// + /// + public InputVariant3Item(global::G.CreateModerationRequestInputVariant3ItemVariant2? value) + { + Text = value; + } + + /// + /// + /// + public InputVariant3Item( + global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? type, + global::G.CreateModerationRequestInputVariant3ItemVariant1? imageUrl, + global::G.CreateModerationRequestInputVariant3ItemVariant2? text + ) + { + Type = type; + + ImageUrl = imageUrl; + Text = text; + } + + /// + /// + /// + public object? Object => + Text as object ?? + ImageUrl as object + ; + + /// + /// + /// + public bool Validate() + { + return IsImageUrl && !IsText || !IsImageUrl && IsText; + } + + /// + /// + /// + public TResult? Match( + global::System.Func? imageUrl = null, + global::System.Func? text = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsImageUrl && imageUrl != null) + { + return imageUrl(ImageUrl!); + } + else if (IsText && text != null) + { + return text(Text!); + } + + return default(TResult); + } + + /// + /// + /// + public void Match( + global::System.Action? imageUrl = null, + global::System.Action? text = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsImageUrl) + { + imageUrl?.Invoke(ImageUrl!); + } + else if (IsText) + { + text?.Invoke(Text!); + } + } + + /// + /// + /// + public override int GetHashCode() + { + var fields = new object?[] + { + ImageUrl, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1), + Text, + typeof(global::G.CreateModerationRequestInputVariant3ItemVariant2), + }; + const int offset = unchecked((int)2166136261); + const int prime = 16777619; + static int HashCodeAggregator(int hashCode, object? value) => value == null + ? (hashCode ^ 0) * prime + : (hashCode ^ value.GetHashCode()) * prime; + + return global::System.Linq.Enumerable.Aggregate(fields, offset, HashCodeAggregator); + } + + /// + /// + /// + public bool Equals(InputVariant3Item other) + { + return + global::System.Collections.Generic.EqualityComparer.Default.Equals(ImageUrl, other.ImageUrl) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Text, other.Text) + ; + } + + /// + /// + /// + public static bool operator ==(InputVariant3Item obj1, InputVariant3Item obj2) + { + return global::System.Collections.Generic.EqualityComparer.Default.Equals(obj1, obj2); + } + + /// + /// + /// + public static bool operator !=(InputVariant3Item obj1, InputVariant3Item obj2) + { + return !(obj1 == obj2); + } + + /// + /// + /// + public override bool Equals(object? obj) + { + return obj is InputVariant3Item o && Equals(o); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesOrder.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesOrder.g.verified.cs new file mode 100644 index 0000000000..a5aa6149dc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesOrder.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.ListFilesOrder.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: desc + /// + public enum ListFilesOrder + { + /// + /// + /// + Asc, + /// + /// + /// + Desc, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ListFilesOrderExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ListFilesOrder value) + { + return value switch + { + ListFilesOrder.Asc => "asc", + ListFilesOrder.Desc => "desc", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ListFilesOrder? ToEnum(string value) + { + return value switch + { + "asc" => ListFilesOrder.Asc, + "desc" => ListFilesOrder.Desc, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesResponse.g.verified.cs index da137156f8..8baa834fca 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesResponse.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesResponse.g.verified.cs @@ -9,6 +9,14 @@ namespace G ///
public sealed partial class ListFilesResponse { + /// + /// Example: list + /// + /// list + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Object { get; set; } + /// /// /// @@ -17,11 +25,28 @@ public sealed partial class ListFilesResponse public required global::System.Collections.Generic.IList Data { get; set; } /// - /// + /// Example: file-abc123 /// - [global::System.Text.Json.Serialization.JsonPropertyName("object")] - [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.ListFilesResponseObjectJsonConverter))] - public global::G.ListFilesResponseObject Object { get; set; } + /// file-abc123 + [global::System.Text.Json.Serialization.JsonPropertyName("first_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string FirstId { get; set; } + + /// + /// Example: file-abc456 + /// + /// file-abc456 + [global::System.Text.Json.Serialization.JsonPropertyName("last_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string LastId { get; set; } + + /// + /// Example: false + /// + /// false + [global::System.Text.Json.Serialization.JsonPropertyName("has_more")] + [global::System.Text.Json.Serialization.JsonRequired] + public required bool HasMore { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -32,15 +57,32 @@ public sealed partial class ListFilesResponse /// /// Initializes a new instance of the class. /// + /// + /// Example: list + /// /// - /// + /// + /// Example: file-abc123 + /// + /// + /// Example: file-abc456 + /// + /// + /// Example: false + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ListFilesResponse( + string @object, global::System.Collections.Generic.IList data, - global::G.ListFilesResponseObject @object) + string firstId, + string lastId, + bool hasMore) { + this.Object = @object ?? throw new global::System.ArgumentNullException(nameof(@object)); this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); - this.Object = @object; + this.FirstId = firstId ?? throw new global::System.ArgumentNullException(nameof(firstId)); + this.LastId = lastId ?? throw new global::System.ArgumentNullException(nameof(lastId)); + this.HasMore = hasMore; } /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListModelsResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListModelsResponse.g.verified.cs index 2a2b6879c4..ba8b922ad2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListModelsResponse.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListModelsResponse.g.verified.cs @@ -21,7 +21,7 @@ public sealed partial class ListModelsResponse /// [global::System.Text.Json.Serialization.JsonPropertyName("data")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::System.Collections.Generic.IList Data { get; set; } + public required global::System.Collections.Generic.IList Data { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -36,7 +36,7 @@ public sealed partial class ListModelsResponse /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ListModelsResponse( - global::System.Collections.Generic.IList data, + global::System.Collections.Generic.IList data, global::G.ListModelsResponseObject @object) { this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListRunStepsIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListRunStepsIncludeItem.g.verified.cs new file mode 100644 index 0000000000..8e72e3fe40 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListRunStepsIncludeItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.ListRunStepsIncludeItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum ListRunStepsIncludeItem + { + /// + /// + /// + StepDetailsToolCallsAnyFileSearchResultsAnyContent, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ListRunStepsIncludeItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ListRunStepsIncludeItem value) + { + return value switch + { + ListRunStepsIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent => "step_details.tool_calls[*].file_search.results[*].content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ListRunStepsIncludeItem? ToEnum(string value) + { + return value switch + { + "step_details.tool_calls[*].file_search.results[*].content" => ListRunStepsIncludeItem.StepDetailsToolCallsAnyFileSearchResultsAnyContent, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs index d0262747d1..a5aa1e3a2a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageDeltaObjectDelta.g.verified.cs @@ -20,7 +20,7 @@ public sealed partial class MessageDeltaObjectDelta /// The content of the message in array of text and/or images. /// [global::System.Text.Json.Serialization.JsonPropertyName("content")] - public global::System.Collections.Generic.IList? Content { get; set; } + public global::System.Collections.Generic.IList? Content { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -40,7 +40,7 @@ public sealed partial class MessageDeltaObjectDelta [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public MessageDeltaObjectDelta( global::G.MessageDeltaObjectDeltaRole? role, - global::System.Collections.Generic.IList? content) + global::System.Collections.Generic.IList? content) { this.Role = role; this.Content = content; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObject.g.verified.cs index 44d99d890c..0284fa457a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObject.g.verified.cs @@ -82,7 +82,7 @@ public sealed partial class MessageObject /// [global::System.Text.Json.Serialization.JsonPropertyName("content")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::System.Collections.Generic.IList Content { get; set; } + public required global::System.Collections.Generic.IList Content { get; set; } /// /// If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. @@ -106,7 +106,7 @@ public sealed partial class MessageObject public required global::System.Collections.Generic.IList? Attachments { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] [global::System.Text.Json.Serialization.JsonRequired] @@ -161,7 +161,7 @@ public sealed partial class MessageObject /// A list of files attached to the message, and the tools they were added to. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public MessageObject( @@ -173,7 +173,7 @@ public MessageObject( global::System.DateTimeOffset? completedAt, global::System.DateTimeOffset? incompleteAt, global::G.MessageObjectRole role, - global::System.Collections.Generic.IList content, + global::System.Collections.Generic.IList content, string? assistantId, string? runId, global::System.Collections.Generic.IList? attachments, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectAttachment.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectAttachment.g.verified.cs index 25b4adadf4..49f0905d60 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectAttachment.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectAttachment.g.verified.cs @@ -19,7 +19,7 @@ public sealed partial class MessageObjectAttachment /// The tools to add this file to. /// [global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -39,7 +39,7 @@ public sealed partial class MessageObjectAttachment [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public MessageObjectAttachment( string? fileId, - global::System.Collections.Generic.IList? tools) + global::System.Collections.Generic.IList? tools) { this.FileId = fileId; this.Tools = tools; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectMetadata.g.verified.cs index 9dd6d9bd2b..e384a49518 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.MessageObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class MessageObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model12.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model15.Json.g.verified.cs similarity index 90% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model12.Json.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model15.Json.g.verified.cs index 8525db01e7..541df9e222 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model12.Json.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model15.Json.g.verified.cs @@ -1,9 +1,9 @@ -//HintName: G.Models.Model12.Json.g.cs +//HintName: G.Models.Model15.Json.g.cs #nullable enable namespace G { - public sealed partial class Model12 + public sealed partial class Model15 { /// /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. @@ -35,14 +35,14 @@ public string ToJson( /// /// Deserializes a JSON string using the provided JsonSerializerContext. /// - public static global::G.Model12? FromJson( + public static global::G.Model15? FromJson( string json, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return global::System.Text.Json.JsonSerializer.Deserialize( json, - typeof(global::G.Model12), - jsonSerializerContext) as global::G.Model12; + typeof(global::G.Model15), + jsonSerializerContext) as global::G.Model15; } /// @@ -52,11 +52,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::G.Model12? FromJson( + public static global::G.Model15? FromJson( string json, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.Deserialize( + return global::System.Text.Json.JsonSerializer.Deserialize( json, jsonSerializerOptions); } @@ -64,14 +64,14 @@ public string ToJson( /// /// Deserializes a JSON stream using the provided JsonSerializerContext. /// - public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( global::System.IO.Stream jsonStream, global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) { return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( jsonStream, - typeof(global::G.Model12), - jsonSerializerContext).ConfigureAwait(false)) as global::G.Model12; + typeof(global::G.Model15), + jsonSerializerContext).ConfigureAwait(false)) as global::G.Model15; } /// @@ -81,11 +81,11 @@ public string ToJson( [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] #endif - public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( global::System.IO.Stream jsonStream, global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) { - return global::System.Text.Json.JsonSerializer.DeserializeAsync( + return global::System.Text.Json.JsonSerializer.DeserializeAsync( jsonStream, jsonSerializerOptions); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model12.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model15.g.verified.cs similarity index 92% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model12.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model15.g.verified.cs index 598f891dbe..05f2f4fdac 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model12.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.Model15.g.verified.cs @@ -1,4 +1,4 @@ -//HintName: G.Models.Model12.g.cs +//HintName: G.Models.Model15.g.cs #nullable enable @@ -7,7 +7,7 @@ namespace G /// /// Describes an OpenAI model offering that can be used with the API. /// - public sealed partial class Model12 + public sealed partial class Model15 { /// /// The model identifier, which can be referenced in the API endpoints. @@ -45,7 +45,7 @@ public sealed partial class Model12 public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// /// The model identifier, which can be referenced in the API endpoints. @@ -60,7 +60,7 @@ public sealed partial class Model12 /// The organization that owns the model. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] - public Model12( + public Model15( string id, global::System.DateTimeOffset created, string ownedBy, @@ -73,9 +73,9 @@ public Model12( } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - public Model12() + public Model15() { } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequest.g.verified.cs index ae561af96f..48b44ddbd3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequest.g.verified.cs @@ -10,7 +10,7 @@ namespace G public sealed partial class ModifyAssistantRequest { /// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// [global::System.Text.Json.Serialization.JsonPropertyName("model")] public string? Model { get; set; } @@ -34,10 +34,11 @@ public sealed partial class ModifyAssistantRequest public string? Instructions { get; set; } /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("tools")] - public global::System.Collections.Generic.IList? Tools { get; set; } + public global::System.Collections.Generic.IList? Tools { get; set; } /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. @@ -46,13 +47,13 @@ public sealed partial class ModifyAssistantRequest public global::G.ModifyAssistantRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 ///
@@ -61,7 +62,8 @@ public sealed partial class ModifyAssistantRequest public double? Temperature { get; set; } /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 ///
@@ -70,9 +72,9 @@ public sealed partial class ModifyAssistantRequest public double? TopP { get; set; } /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] @@ -89,7 +91,7 @@ public sealed partial class ModifyAssistantRequest /// Initializes a new instance of the class. ///
/// - /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them. /// /// /// The name of the assistant. The maximum length is 256 characters. @@ -101,28 +103,30 @@ public sealed partial class ModifyAssistantRequest /// The system instructions that the assistant uses. The maximum length is 256,000 characters. /// /// - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
+ /// Default Value: [] /// /// /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// - /// empty
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
/// Default Value: 1
/// Example: 1 /// /// - /// empty
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ /// We generally recommend altering this or temperature but not both.
/// Default Value: 1
/// Example: 1 /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] @@ -131,7 +135,7 @@ public ModifyAssistantRequest( string? name, string? description, string? instructions, - global::System.Collections.Generic.IList? tools, + global::System.Collections.Generic.IList? tools, global::G.ModifyAssistantRequestToolResources? toolResources, object? metadata, double? temperature, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs index a462c93be5..242d885ca3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyAssistantRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs index 48c7702c7e..e4036209c7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyAssistantRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class ModifyAssistantRequestToolResourcesCodeInterpreter { /// - /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class ModifyAssistantRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ModifyAssistantRequestToolResourcesCodeInterpreter( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequest.g.verified.cs index e87ee5baac..3b7a643f7e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequest.g.verified.cs @@ -10,7 +10,7 @@ namespace G public sealed partial class ModifyMessageRequest { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -25,7 +25,7 @@ public sealed partial class ModifyMessageRequest /// Initializes a new instance of the class. ///
/// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ModifyMessageRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs index 7f07192464..277457c9d3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyMessageRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyMessageRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequest.g.verified.cs index a9ee458359..16bbc43944 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequest.g.verified.cs @@ -10,7 +10,7 @@ namespace G public sealed partial class ModifyRunRequest { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -25,7 +25,7 @@ public sealed partial class ModifyRunRequest /// Initializes a new instance of the class. ///
/// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ModifyRunRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs index 91661d22bf..1bfc68a0b7 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyRunRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyRunRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequest.g.verified.cs index c634e0c37c..852cb989f9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequest.g.verified.cs @@ -16,7 +16,7 @@ public sealed partial class ModifyThreadRequest public global::G.ModifyThreadRequestToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -34,7 +34,7 @@ public sealed partial class ModifyThreadRequest /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ModifyThreadRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs index cfdc980a46..4a45654f0b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ModifyThreadRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs index 0ae105fac6..b34da8b308 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ModifyThreadRequestToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class ModifyThreadRequestToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class ModifyThreadRequestToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ModifyThreadRequestToolResourcesCodeInterpreter( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContent.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContent.Json.g.verified.cs new file mode 100644 index 0000000000..11f3b702d8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContent.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.PredictionContent.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class PredictionContent + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.PredictionContent? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.PredictionContent), + jsonSerializerContext) as global::G.PredictionContent; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.PredictionContent? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.PredictionContent), + jsonSerializerContext).ConfigureAwait(false)) as global::G.PredictionContent; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContent.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContent.g.verified.cs new file mode 100644 index 0000000000..c78223fcab --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContent.g.verified.cs @@ -0,0 +1,67 @@ +//HintName: G.Models.PredictionContent.g.cs + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// Static predicted output content, such as the content of a text file that is
+ /// being regenerated. + ///
+ public sealed partial class PredictionContent + { + /// + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.PredictionContentTypeJsonConverter))] + public global::G.PredictionContentType Type { get; set; } + + /// + /// The content that should be matched when generating a model response.
+ /// If generated tokens would match this content, the entire model response
+ /// can be returned much more quickly. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("content")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.OneOfJsonConverter>))] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.OneOf> Content { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. + /// + /// + /// The content that should be matched when generating a model response.
+ /// If generated tokens would match this content, the entire model response
+ /// can be returned much more quickly. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public PredictionContent( + global::G.OneOf> content, + global::G.PredictionContentType type) + { + this.Content = content; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public PredictionContent() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContentType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContentType.g.verified.cs new file mode 100644 index 0000000000..72fa51ecce --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.PredictionContentType.g.verified.cs @@ -0,0 +1,47 @@ +//HintName: G.Models.PredictionContentType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`. + ///
+ public enum PredictionContentType + { + /// + /// + /// + Content, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class PredictionContentTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this PredictionContentType value) + { + return value switch + { + PredictionContentType.Content => "content", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static PredictionContentType? ToEnum(string value) + { + return value switch + { + "content" => PredictionContentType.Content, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimit.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimit.Json.g.verified.cs new file mode 100644 index 0000000000..949f02156b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimit.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ProjectRateLimit.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ProjectRateLimit + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ProjectRateLimit? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ProjectRateLimit), + jsonSerializerContext) as global::G.ProjectRateLimit; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ProjectRateLimit? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ProjectRateLimit), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ProjectRateLimit; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimit.g.verified.cs new file mode 100644 index 0000000000..fe34374593 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimit.g.verified.cs @@ -0,0 +1,137 @@ +//HintName: G.Models.ProjectRateLimit.g.cs + +#nullable enable + +namespace G +{ + /// + /// Represents a project rate limit config. + /// + public sealed partial class ProjectRateLimit + { + /// + /// The object type, which is always `project.rate_limit` + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.ProjectRateLimitObjectJsonConverter))] + public global::G.ProjectRateLimitObject Object { get; set; } + + /// + /// The identifier, which can be referenced in API endpoints. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Id { get; set; } + + /// + /// The model this rate limit applies to. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Model { get; set; } + + /// + /// The maximum requests per minute. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_requests_per_1_minute")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int MaxRequestsPer1Minute { get; set; } + + /// + /// The maximum tokens per minute. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_tokens_per_1_minute")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int MaxTokensPer1Minute { get; set; } + + /// + /// The maximum images per minute. Only present for relevant models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_images_per_1_minute")] + public int? MaxImagesPer1Minute { get; set; } + + /// + /// The maximum audio megabytes per minute. Only present for relevant models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_audio_megabytes_per_1_minute")] + public int? MaxAudioMegabytesPer1Minute { get; set; } + + /// + /// The maximum requests per day. Only present for relevant models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_requests_per_1_day")] + public int? MaxRequestsPer1Day { get; set; } + + /// + /// The maximum batch input tokens per day. Only present for relevant models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("batch_1_day_max_input_tokens")] + public int? Batch1DayMaxInputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The object type, which is always `project.rate_limit` + /// + /// + /// The identifier, which can be referenced in API endpoints. + /// + /// + /// The model this rate limit applies to. + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only present for relevant models. + /// + /// + /// The maximum audio megabytes per minute. Only present for relevant models. + /// + /// + /// The maximum requests per day. Only present for relevant models. + /// + /// + /// The maximum batch input tokens per day. Only present for relevant models. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ProjectRateLimit( + string id, + string model, + int maxRequestsPer1Minute, + int maxTokensPer1Minute, + global::G.ProjectRateLimitObject @object, + int? maxImagesPer1Minute, + int? maxAudioMegabytesPer1Minute, + int? maxRequestsPer1Day, + int? batch1DayMaxInputTokens) + { + this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); + this.Model = model ?? throw new global::System.ArgumentNullException(nameof(model)); + this.MaxRequestsPer1Minute = maxRequestsPer1Minute; + this.MaxTokensPer1Minute = maxTokensPer1Minute; + this.Object = @object; + this.MaxImagesPer1Minute = maxImagesPer1Minute; + this.MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute; + this.MaxRequestsPer1Day = maxRequestsPer1Day; + this.Batch1DayMaxInputTokens = batch1DayMaxInputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public ProjectRateLimit() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponse.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponse.Json.g.verified.cs new file mode 100644 index 0000000000..8beef09a4f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponse.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ProjectRateLimitListResponse.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ProjectRateLimitListResponse + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ProjectRateLimitListResponse? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ProjectRateLimitListResponse), + jsonSerializerContext) as global::G.ProjectRateLimitListResponse; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ProjectRateLimitListResponse? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ProjectRateLimitListResponse), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ProjectRateLimitListResponse; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponse.g.verified.cs new file mode 100644 index 0000000000..4f518496b3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponse.g.verified.cs @@ -0,0 +1,83 @@ +//HintName: G.Models.ProjectRateLimitListResponse.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class ProjectRateLimitListResponse + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.ProjectRateLimitListResponseObjectJsonConverter))] + public global::G.ProjectRateLimitListResponseObject Object { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("data")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Data { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("first_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string FirstId { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("last_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string LastId { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("has_more")] + [global::System.Text.Json.Serialization.JsonRequired] + public required bool HasMore { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// + /// + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ProjectRateLimitListResponse( + global::System.Collections.Generic.IList data, + string firstId, + string lastId, + bool hasMore, + global::G.ProjectRateLimitListResponseObject @object) + { + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.FirstId = firstId ?? throw new global::System.ArgumentNullException(nameof(firstId)); + this.LastId = lastId ?? throw new global::System.ArgumentNullException(nameof(lastId)); + this.HasMore = hasMore; + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public ProjectRateLimitListResponse() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponseObject.g.verified.cs new file mode 100644 index 0000000000..f9e8bc013c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitListResponseObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.ProjectRateLimitListResponseObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum ProjectRateLimitListResponseObject + { + /// + /// + /// + List, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ProjectRateLimitListResponseObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ProjectRateLimitListResponseObject value) + { + return value switch + { + ProjectRateLimitListResponseObject.List => "list", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ProjectRateLimitListResponseObject? ToEnum(string value) + { + return value switch + { + "list" => ProjectRateLimitListResponseObject.List, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitObject.g.verified.cs new file mode 100644 index 0000000000..9db2a66b64 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.ProjectRateLimitObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// The object type, which is always `project.rate_limit` + /// + public enum ProjectRateLimitObject + { + /// + /// + /// + ProjectRateLimit, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class ProjectRateLimitObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this ProjectRateLimitObject value) + { + return value switch + { + ProjectRateLimitObject.ProjectRateLimit => "project.rate_limit", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static ProjectRateLimitObject? ToEnum(string value) + { + return value switch + { + "project.rate_limit" => ProjectRateLimitObject.ProjectRateLimit, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitUpdateRequest.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitUpdateRequest.Json.g.verified.cs new file mode 100644 index 0000000000..bb786581dd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitUpdateRequest.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ProjectRateLimitUpdateRequest.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class ProjectRateLimitUpdateRequest + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ProjectRateLimitUpdateRequest? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ProjectRateLimitUpdateRequest), + jsonSerializerContext) as global::G.ProjectRateLimitUpdateRequest; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ProjectRateLimitUpdateRequest? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ProjectRateLimitUpdateRequest), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ProjectRateLimitUpdateRequest; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitUpdateRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitUpdateRequest.g.verified.cs new file mode 100644 index 0000000000..707f3ff1a0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ProjectRateLimitUpdateRequest.g.verified.cs @@ -0,0 +1,99 @@ +//HintName: G.Models.ProjectRateLimitUpdateRequest.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class ProjectRateLimitUpdateRequest + { + /// + /// The maximum requests per minute. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_requests_per_1_minute")] + public int? MaxRequestsPer1Minute { get; set; } + + /// + /// The maximum tokens per minute. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_tokens_per_1_minute")] + public int? MaxTokensPer1Minute { get; set; } + + /// + /// The maximum images per minute. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_images_per_1_minute")] + public int? MaxImagesPer1Minute { get; set; } + + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_audio_megabytes_per_1_minute")] + public int? MaxAudioMegabytesPer1Minute { get; set; } + + /// + /// The maximum requests per day. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("max_requests_per_1_day")] + public int? MaxRequestsPer1Day { get; set; } + + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("batch_1_day_max_input_tokens")] + public int? Batch1DayMaxInputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public ProjectRateLimitUpdateRequest( + int? maxRequestsPer1Minute, + int? maxTokensPer1Minute, + int? maxImagesPer1Minute, + int? maxAudioMegabytesPer1Minute, + int? maxRequestsPer1Day, + int? batch1DayMaxInputTokens) + { + this.MaxRequestsPer1Minute = maxRequestsPer1Minute; + this.MaxTokensPer1Minute = maxTokensPer1Minute; + this.MaxImagesPer1Minute = maxImagesPer1Minute; + this.MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute; + this.MaxRequestsPer1Day = maxRequestsPer1Day; + this.Batch1DayMaxInputTokens = batch1DayMaxInputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public ProjectRateLimitUpdateRequest() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreate.Json.g.verified.cs new file mode 100644 index 0000000000..8c114d8b66 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreate.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventConversationItemCreate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventConversationItemCreate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventConversationItemCreate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventConversationItemCreate), + jsonSerializerContext) as global::G.RealtimeClientEventConversationItemCreate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventConversationItemCreate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventConversationItemCreate), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventConversationItemCreate; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreate.g.verified.cs new file mode 100644 index 0000000000..dabcf140a2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreate.g.verified.cs @@ -0,0 +1,90 @@ +//HintName: G.Models.RealtimeClientEventConversationItemCreate.g.cs + +#nullable enable + +namespace G +{ + /// + /// Add a new Item to the Conversation's context, including messages, function
+ /// calls, and function call responses. This event can be used both to populate a
+ /// "history" of the conversation and to add new items mid-stream, but has the
+ /// current limitation that it cannot populate assistant audio messages.
+ /// If successful, the server will respond with a `conversation.item.created`
+ /// event, otherwise an `error` event will be sent. + ///
+ public sealed partial class RealtimeClientEventConversationItemCreate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `conversation.item.create`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventConversationItemCreateTypeJsonConverter))] + public global::G.RealtimeClientEventConversationItemCreateType Type { get; set; } + + /// + /// The ID of the preceding item after which the new item will be inserted.
+ /// If not set, the new item will be appended to the end of the conversation.
+ /// If set, it allows an item to be inserted mid-conversation. If the ID
+ /// cannot be found, an error will be returned and the item will not be added. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] + public string? PreviousItemId { get; set; } + + /// + /// The item to add to the conversation. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeConversationItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `conversation.item.create`. + /// + /// + /// The ID of the preceding item after which the new item will be inserted.
+ /// If not set, the new item will be appended to the end of the conversation.
+ /// If set, it allows an item to be inserted mid-conversation. If the ID
+ /// cannot be found, an error will be returned and the item will not be added. + /// + /// + /// The item to add to the conversation. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventConversationItemCreate( + global::G.RealtimeConversationItem item, + string? eventId, + global::G.RealtimeClientEventConversationItemCreateType type, + string? previousItemId) + { + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.EventId = eventId; + this.Type = type; + this.PreviousItemId = previousItemId; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventConversationItemCreate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreateType.g.verified.cs new file mode 100644 index 0000000000..a3781a9c51 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemCreateType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventConversationItemCreateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.create`. + /// + public enum RealtimeClientEventConversationItemCreateType + { + /// + /// + /// + ConversationItemCreate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventConversationItemCreateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventConversationItemCreateType value) + { + return value switch + { + RealtimeClientEventConversationItemCreateType.ConversationItemCreate => "conversation.item.create", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventConversationItemCreateType? ToEnum(string value) + { + return value switch + { + "conversation.item.create" => RealtimeClientEventConversationItemCreateType.ConversationItemCreate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDelete.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDelete.Json.g.verified.cs new file mode 100644 index 0000000000..74a6e6f1d1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDelete.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventConversationItemDelete.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventConversationItemDelete + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventConversationItemDelete? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventConversationItemDelete), + jsonSerializerContext) as global::G.RealtimeClientEventConversationItemDelete; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventConversationItemDelete? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventConversationItemDelete), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventConversationItemDelete; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDelete.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDelete.g.verified.cs new file mode 100644 index 0000000000..e776570080 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDelete.g.verified.cs @@ -0,0 +1,71 @@ +//HintName: G.Models.RealtimeClientEventConversationItemDelete.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event when you want to remove any item from the conversation
+ /// history. The server will respond with a `conversation.item.deleted` event,
+ /// unless the item does not exist in the conversation history, in which case the
+ /// server will respond with an error. + ///
+ public sealed partial class RealtimeClientEventConversationItemDelete + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `conversation.item.delete`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventConversationItemDeleteTypeJsonConverter))] + public global::G.RealtimeClientEventConversationItemDeleteType Type { get; set; } + + /// + /// The ID of the item to delete. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `conversation.item.delete`. + /// + /// + /// The ID of the item to delete. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventConversationItemDelete( + string itemId, + string? eventId, + global::G.RealtimeClientEventConversationItemDeleteType type) + { + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventConversationItemDelete() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDeleteType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDeleteType.g.verified.cs new file mode 100644 index 0000000000..0d2c018b44 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemDeleteType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventConversationItemDeleteType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.delete`. + /// + public enum RealtimeClientEventConversationItemDeleteType + { + /// + /// + /// + ConversationItemDelete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventConversationItemDeleteTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventConversationItemDeleteType value) + { + return value switch + { + RealtimeClientEventConversationItemDeleteType.ConversationItemDelete => "conversation.item.delete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventConversationItemDeleteType? ToEnum(string value) + { + return value switch + { + "conversation.item.delete" => RealtimeClientEventConversationItemDeleteType.ConversationItemDelete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncate.Json.g.verified.cs new file mode 100644 index 0000000000..ad905b6800 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncate.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventConversationItemTruncate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventConversationItemTruncate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventConversationItemTruncate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventConversationItemTruncate), + jsonSerializerContext) as global::G.RealtimeClientEventConversationItemTruncate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventConversationItemTruncate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventConversationItemTruncate), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventConversationItemTruncate; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncate.g.verified.cs new file mode 100644 index 0000000000..b26b74cd96 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncate.g.verified.cs @@ -0,0 +1,106 @@ +//HintName: G.Models.RealtimeClientEventConversationItemTruncate.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to truncate a previous assistant message’s audio. The server
+ /// will produce audio faster than realtime, so this event is useful when the user
+ /// interrupts to truncate audio that has already been sent to the client but not
+ /// yet played. This will synchronize the server's understanding of the audio with
+ /// the client's playback.
+ /// Truncating audio will delete the server-side text transcript to ensure there
+ /// is not text in the context that hasn't been heard by the user.
+ /// If successful, the server will respond with a `conversation.item.truncated`
+ /// event. + ///
+ public sealed partial class RealtimeClientEventConversationItemTruncate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `conversation.item.truncate`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventConversationItemTruncateTypeJsonConverter))] + public global::G.RealtimeClientEventConversationItemTruncateType Type { get; set; } + + /// + /// The ID of the assistant message item to truncate. Only assistant message
+ /// items can be truncated. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part to truncate. Set this to 0. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Inclusive duration up to which audio is truncated, in milliseconds. If
+ /// the audio_end_ms is greater than the actual audio duration, the server
+ /// will respond with an error. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioEndMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `conversation.item.truncate`. + /// + /// + /// The ID of the assistant message item to truncate. Only assistant message
+ /// items can be truncated. + /// + /// + /// The index of the content part to truncate. Set this to 0. + /// + /// + /// Inclusive duration up to which audio is truncated, in milliseconds. If
+ /// the audio_end_ms is greater than the actual audio duration, the server
+ /// will respond with an error. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventConversationItemTruncate( + string itemId, + int contentIndex, + int audioEndMs, + string? eventId, + global::G.RealtimeClientEventConversationItemTruncateType type) + { + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.AudioEndMs = audioEndMs; + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventConversationItemTruncate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncateType.g.verified.cs new file mode 100644 index 0000000000..0e8c839d75 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventConversationItemTruncateType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventConversationItemTruncateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.truncate`. + /// + public enum RealtimeClientEventConversationItemTruncateType + { + /// + /// + /// + ConversationItemTruncate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventConversationItemTruncateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventConversationItemTruncateType value) + { + return value switch + { + RealtimeClientEventConversationItemTruncateType.ConversationItemTruncate => "conversation.item.truncate", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventConversationItemTruncateType? ToEnum(string value) + { + return value switch + { + "conversation.item.truncate" => RealtimeClientEventConversationItemTruncateType.ConversationItemTruncate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.verified.cs new file mode 100644 index 0000000000..58bba7000b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferAppend.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventInputAudioBufferAppend + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventInputAudioBufferAppend? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventInputAudioBufferAppend), + jsonSerializerContext) as global::G.RealtimeClientEventInputAudioBufferAppend; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventInputAudioBufferAppend? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventInputAudioBufferAppend), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventInputAudioBufferAppend; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.g.verified.cs new file mode 100644 index 0000000000..196c538330 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppend.g.verified.cs @@ -0,0 +1,78 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferAppend.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to append audio bytes to the input audio buffer. The audio
+ /// buffer is temporary storage you can write to and later commit. In Server VAD
+ /// mode, the audio buffer is used to detect speech and the server will decide
+ /// when to commit. When Server VAD is disabled, you must commit the audio buffer
+ /// manually.
+ /// The client may choose how much audio to place in each event up to a maximum
+ /// of 15 MiB, for example streaming smaller chunks from the client may allow the
+ /// VAD to be more responsive. Unlike made other client events, the server will
+ /// not send a confirmation response to this event. + ///
+ public sealed partial class RealtimeClientEventInputAudioBufferAppend + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.append`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferAppendTypeJsonConverter))] + public global::G.RealtimeClientEventInputAudioBufferAppendType Type { get; set; } + + /// + /// Base64-encoded audio bytes. This must be in the format specified by the
+ /// `input_audio_format` field in the session configuration. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Audio { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `input_audio_buffer.append`. + /// + /// + /// Base64-encoded audio bytes. This must be in the format specified by the
+ /// `input_audio_format` field in the session configuration. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventInputAudioBufferAppend( + string audio, + string? eventId, + global::G.RealtimeClientEventInputAudioBufferAppendType type) + { + this.Audio = audio ?? throw new global::System.ArgumentNullException(nameof(audio)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventInputAudioBufferAppend() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs new file mode 100644 index 0000000000..3631f9a37c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferAppendType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.append`. + /// + public enum RealtimeClientEventInputAudioBufferAppendType + { + /// + /// + /// + InputAudioBufferAppend, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventInputAudioBufferAppendTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventInputAudioBufferAppendType value) + { + return value switch + { + RealtimeClientEventInputAudioBufferAppendType.InputAudioBufferAppend => "input_audio_buffer.append", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventInputAudioBufferAppendType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.append" => RealtimeClientEventInputAudioBufferAppendType.InputAudioBufferAppend, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.verified.cs new file mode 100644 index 0000000000..a33f9b9ee1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferClear.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventInputAudioBufferClear + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventInputAudioBufferClear? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventInputAudioBufferClear), + jsonSerializerContext) as global::G.RealtimeClientEventInputAudioBufferClear; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventInputAudioBufferClear? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventInputAudioBufferClear), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventInputAudioBufferClear; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.g.verified.cs new file mode 100644 index 0000000000..42e7189c4f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClear.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferClear.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to clear the audio bytes in the buffer. The server will
+ /// respond with an `input_audio_buffer.cleared` event. + ///
+ public sealed partial class RealtimeClientEventInputAudioBufferClear + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.clear`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferClearTypeJsonConverter))] + public global::G.RealtimeClientEventInputAudioBufferClearType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `input_audio_buffer.clear`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventInputAudioBufferClear( + string? eventId, + global::G.RealtimeClientEventInputAudioBufferClearType type) + { + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventInputAudioBufferClear() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClearType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClearType.g.verified.cs new file mode 100644 index 0000000000..4644455e59 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferClearType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferClearType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.clear`. + /// + public enum RealtimeClientEventInputAudioBufferClearType + { + /// + /// + /// + InputAudioBufferClear, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventInputAudioBufferClearTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventInputAudioBufferClearType value) + { + return value switch + { + RealtimeClientEventInputAudioBufferClearType.InputAudioBufferClear => "input_audio_buffer.clear", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventInputAudioBufferClearType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.clear" => RealtimeClientEventInputAudioBufferClearType.InputAudioBufferClear, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.verified.cs new file mode 100644 index 0000000000..6ade497d6f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferCommit.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventInputAudioBufferCommit + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventInputAudioBufferCommit? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventInputAudioBufferCommit), + jsonSerializerContext) as global::G.RealtimeClientEventInputAudioBufferCommit; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventInputAudioBufferCommit? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventInputAudioBufferCommit), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventInputAudioBufferCommit; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.g.verified.cs new file mode 100644 index 0000000000..1fa21fcd0b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommit.g.verified.cs @@ -0,0 +1,64 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferCommit.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to commit the user input audio buffer, which will create a
+ /// new user message item in the conversation. This event will produce an error
+ /// if the input audio buffer is empty. When in Server VAD mode, the client does
+ /// not need to send this event, the server will commit the audio buffer
+ /// automatically.
+ /// Committing the input audio buffer will trigger input audio transcription
+ /// (if enabled in session configuration), but it will not create a response
+ /// from the model. The server will respond with an `input_audio_buffer.committed`
+ /// event. + ///
+ public sealed partial class RealtimeClientEventInputAudioBufferCommit + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.commit`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferCommitTypeJsonConverter))] + public global::G.RealtimeClientEventInputAudioBufferCommitType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `input_audio_buffer.commit`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventInputAudioBufferCommit( + string? eventId, + global::G.RealtimeClientEventInputAudioBufferCommitType type) + { + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventInputAudioBufferCommit() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs new file mode 100644 index 0000000000..965df76d04 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventInputAudioBufferCommitType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.commit`. + /// + public enum RealtimeClientEventInputAudioBufferCommitType + { + /// + /// + /// + InputAudioBufferCommit, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventInputAudioBufferCommitTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventInputAudioBufferCommitType value) + { + return value switch + { + RealtimeClientEventInputAudioBufferCommitType.InputAudioBufferCommit => "input_audio_buffer.commit", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventInputAudioBufferCommitType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.commit" => RealtimeClientEventInputAudioBufferCommitType.InputAudioBufferCommit, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancel.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancel.Json.g.verified.cs new file mode 100644 index 0000000000..c93c473cfe --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancel.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventResponseCancel.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventResponseCancel + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventResponseCancel? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventResponseCancel), + jsonSerializerContext) as global::G.RealtimeClientEventResponseCancel; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventResponseCancel? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventResponseCancel), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventResponseCancel; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancel.g.verified.cs new file mode 100644 index 0000000000..b7017454ac --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancel.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.RealtimeClientEventResponseCancel.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to cancel an in-progress response. The server will respond
+ /// with a `response.cancelled` event or an error if there is no response to
+ /// cancel. + ///
+ public sealed partial class RealtimeClientEventResponseCancel + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `response.cancel`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventResponseCancelTypeJsonConverter))] + public global::G.RealtimeClientEventResponseCancelType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `response.cancel`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventResponseCancel( + string? eventId, + global::G.RealtimeClientEventResponseCancelType type) + { + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventResponseCancel() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancelType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancelType.g.verified.cs new file mode 100644 index 0000000000..82b57c4ee0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCancelType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventResponseCancelType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.cancel`. + /// + public enum RealtimeClientEventResponseCancelType + { + /// + /// + /// + ResponseCancel, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventResponseCancelTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventResponseCancelType value) + { + return value switch + { + RealtimeClientEventResponseCancelType.ResponseCancel => "response.cancel", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventResponseCancelType? ToEnum(string value) + { + return value switch + { + "response.cancel" => RealtimeClientEventResponseCancelType.ResponseCancel, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreate.Json.g.verified.cs new file mode 100644 index 0000000000..9b05be5f7f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreate.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventResponseCreate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventResponseCreate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventResponseCreate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventResponseCreate), + jsonSerializerContext) as global::G.RealtimeClientEventResponseCreate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventResponseCreate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventResponseCreate), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventResponseCreate; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreate.g.verified.cs new file mode 100644 index 0000000000..d0adfd3ac3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreate.g.verified.cs @@ -0,0 +1,79 @@ +//HintName: G.Models.RealtimeClientEventResponseCreate.g.cs + +#nullable enable + +namespace G +{ + /// + /// This event instructs the server to create a Response, which means triggering
+ /// model inference. When in Server VAD mode, the server will create Responses
+ /// automatically.
+ /// A Response will include at least one Item, and may have two, in which case
+ /// the second will be a function call. These Items will be appended to the
+ /// conversation history.
+ /// The server will respond with a `response.created` event, events for Items
+ /// and content created, and finally a `response.done` event to indicate the
+ /// Response is complete.
+ /// The `response.create` event includes inference configuration like
+ /// `instructions`, and `temperature`. These fields will override the Session's
+ /// configuration for this Response only. + ///
+ public sealed partial class RealtimeClientEventResponseCreate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `response.create`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventResponseCreateTypeJsonConverter))] + public global::G.RealtimeClientEventResponseCreateType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeSession Response { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `response.create`. + /// + /// + /// Realtime session object configuration. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventResponseCreate( + global::G.RealtimeSession response, + string? eventId, + global::G.RealtimeClientEventResponseCreateType type) + { + this.Response = response ?? throw new global::System.ArgumentNullException(nameof(response)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventResponseCreate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreateType.g.verified.cs new file mode 100644 index 0000000000..d64b3ebfc3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventResponseCreateType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventResponseCreateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.create`. + /// + public enum RealtimeClientEventResponseCreateType + { + /// + /// + /// + ResponseCreate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventResponseCreateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventResponseCreateType value) + { + return value switch + { + RealtimeClientEventResponseCreateType.ResponseCreate => "response.create", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventResponseCreateType? ToEnum(string value) + { + return value switch + { + "response.create" => RealtimeClientEventResponseCreateType.ResponseCreate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdate.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdate.Json.g.verified.cs new file mode 100644 index 0000000000..a999d26744 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdate.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeClientEventSessionUpdate.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeClientEventSessionUpdate + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeClientEventSessionUpdate? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeClientEventSessionUpdate), + jsonSerializerContext) as global::G.RealtimeClientEventSessionUpdate; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeClientEventSessionUpdate? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeClientEventSessionUpdate), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeClientEventSessionUpdate; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdate.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdate.g.verified.cs new file mode 100644 index 0000000000..b358e45c45 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdate.g.verified.cs @@ -0,0 +1,73 @@ +//HintName: G.Models.RealtimeClientEventSessionUpdate.g.cs + +#nullable enable + +namespace G +{ + /// + /// Send this event to update the session’s default configuration. The client may
+ /// send this event at any time to update the session configuration, and any
+ /// field may be updated at any time, except for "voice". The server will respond
+ /// with a `session.updated` event that shows the full effective configuration.
+ /// Only fields that are present are updated, thus the correct way to clear a
+ /// field like "instructions" is to pass an empty string. + ///
+ public sealed partial class RealtimeClientEventSessionUpdate + { + /// + /// Optional client-generated ID used to identify this event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// The event type, must be `session.update`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeClientEventSessionUpdateTypeJsonConverter))] + public global::G.RealtimeClientEventSessionUpdateType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("session")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeSession Session { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Optional client-generated ID used to identify this event. + /// + /// + /// The event type, must be `session.update`. + /// + /// + /// Realtime session object configuration. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeClientEventSessionUpdate( + global::G.RealtimeSession session, + string? eventId, + global::G.RealtimeClientEventSessionUpdateType type) + { + this.Session = session ?? throw new global::System.ArgumentNullException(nameof(session)); + this.EventId = eventId; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeClientEventSessionUpdate() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdateType.g.verified.cs new file mode 100644 index 0000000000..c6685411aa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeClientEventSessionUpdateType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeClientEventSessionUpdateType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `session.update`. + /// + public enum RealtimeClientEventSessionUpdateType + { + /// + /// + /// + SessionUpdate, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeClientEventSessionUpdateTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeClientEventSessionUpdateType value) + { + return value switch + { + RealtimeClientEventSessionUpdateType.SessionUpdate => "session.update", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeClientEventSessionUpdateType? ToEnum(string value) + { + return value switch + { + "session.update" => RealtimeClientEventSessionUpdateType.SessionUpdate, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItem.Json.g.verified.cs new file mode 100644 index 0000000000..3a0d04dd9a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItem.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeConversationItem.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeConversationItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeConversationItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeConversationItem), + jsonSerializerContext) as global::G.RealtimeConversationItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeConversationItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeConversationItem), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeConversationItem; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItem.g.verified.cs new file mode 100644 index 0000000000..a23b47f175 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItem.g.verified.cs @@ -0,0 +1,171 @@ +//HintName: G.Models.RealtimeConversationItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// The item to add to the conversation. + /// + public sealed partial class RealtimeConversationItem + { + /// + /// The unique ID of the item, this can be generated by the client to help
+ /// manage server-side context, but is not required because the server will
+ /// generate one if not provided. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The type of the item (`message`, `function_call`, `function_call_output`). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeConversationItemTypeJsonConverter))] + public global::G.RealtimeConversationItemType? Type { get; set; } + + /// + /// Identifier for the API object being returned - always `realtime.item`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeConversationItemObjectJsonConverter))] + public global::G.RealtimeConversationItemObject? Object { get; set; } + + /// + /// The status of the item (`completed`, `incomplete`). These have no effect
+ /// on the conversation, but are accepted for consistency with the
+ /// `conversation.item.created` event. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("status")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeConversationItemStatusJsonConverter))] + public global::G.RealtimeConversationItemStatus? Status { get; set; } + + /// + /// The role of the message sender (`user`, `assistant`, `system`), only
+ /// applicable for `message` items. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("role")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeConversationItemRoleJsonConverter))] + public global::G.RealtimeConversationItemRole? Role { get; set; } + + /// + /// The content of the message, applicable for `message` items.
+ /// - Message items of role `system` support only `input_text` content
+ /// - Message items of role `user` support `input_text` and `input_audio`
+ /// content
+ /// - Message items of role `assistant` support `text` content. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// The ID of the function call (for `function_call` and
+ /// `function_call_output` items). If passed on a `function_call_output`
+ /// item, the server will check that a `function_call` item with the same
+ /// ID exists in the conversation history. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] + public string? CallId { get; set; } + + /// + /// The name of the function being called (for `function_call` items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The arguments of the function call (for `function_call` items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("arguments")] + public string? Arguments { get; set; } + + /// + /// The output of the function call (for `function_call_output` items). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output")] + public string? Output { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the item, this can be generated by the client to help
+ /// manage server-side context, but is not required because the server will
+ /// generate one if not provided. + /// + /// + /// The type of the item (`message`, `function_call`, `function_call_output`). + /// + /// + /// Identifier for the API object being returned - always `realtime.item`. + /// + /// + /// The status of the item (`completed`, `incomplete`). These have no effect
+ /// on the conversation, but are accepted for consistency with the
+ /// `conversation.item.created` event. + /// + /// + /// The role of the message sender (`user`, `assistant`, `system`), only
+ /// applicable for `message` items. + /// + /// + /// The content of the message, applicable for `message` items.
+ /// - Message items of role `system` support only `input_text` content
+ /// - Message items of role `user` support `input_text` and `input_audio`
+ /// content
+ /// - Message items of role `assistant` support `text` content. + /// + /// + /// The ID of the function call (for `function_call` and
+ /// `function_call_output` items). If passed on a `function_call_output`
+ /// item, the server will check that a `function_call` item with the same
+ /// ID exists in the conversation history. + /// + /// + /// The name of the function being called (for `function_call` items). + /// + /// + /// The arguments of the function call (for `function_call` items). + /// + /// + /// The output of the function call (for `function_call_output` items). + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeConversationItem( + string? id, + global::G.RealtimeConversationItemType? type, + global::G.RealtimeConversationItemObject? @object, + global::G.RealtimeConversationItemStatus? status, + global::G.RealtimeConversationItemRole? role, + global::System.Collections.Generic.IList? content, + string? callId, + string? name, + string? arguments, + string? output) + { + this.Id = id; + this.Type = type; + this.Object = @object; + this.Status = status; + this.Role = role; + this.Content = content; + this.CallId = callId; + this.Name = name; + this.Arguments = arguments; + this.Output = output; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeConversationItem() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItem.Json.g.verified.cs new file mode 100644 index 0000000000..4551357106 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItem.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeConversationItemContentItem.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeConversationItemContentItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeConversationItemContentItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeConversationItemContentItem), + jsonSerializerContext) as global::G.RealtimeConversationItemContentItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeConversationItemContentItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeConversationItemContentItem), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeConversationItemContentItem; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItem.g.verified.cs new file mode 100644 index 0000000000..0aa8fe56bb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItem.g.verified.cs @@ -0,0 +1,78 @@ +//HintName: G.Models.RealtimeConversationItemContentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RealtimeConversationItemContentItem + { + /// + /// The content type (`input_text`, `input_audio`, `text`). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeConversationItemContentItemTypeJsonConverter))] + public global::G.RealtimeConversationItemContentItemType? Type { get; set; } + + /// + /// The text content, used for `input_text` and `text` content types. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio bytes, used for `input_audio` content type. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio, used for `input_audio` content type. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The content type (`input_text`, `input_audio`, `text`). + /// + /// + /// The text content, used for `input_text` and `text` content types. + /// + /// + /// Base64-encoded audio bytes, used for `input_audio` content type. + /// + /// + /// The transcript of the audio, used for `input_audio` content type. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeConversationItemContentItem( + global::G.RealtimeConversationItemContentItemType? type, + string? text, + string? audio, + string? transcript) + { + this.Type = type; + this.Text = text; + this.Audio = audio; + this.Transcript = transcript; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeConversationItemContentItem() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItemType.g.verified.cs new file mode 100644 index 0000000000..4db7a65368 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemContentItemType.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.RealtimeConversationItemContentItemType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content type (`input_text`, `input_audio`, `text`). + /// + public enum RealtimeConversationItemContentItemType + { + /// + /// + /// + InputAudio, + /// + /// + /// + InputText, + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemContentItemTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemContentItemType value) + { + return value switch + { + RealtimeConversationItemContentItemType.InputAudio => "input_audio", + RealtimeConversationItemContentItemType.InputText => "input_text", + RealtimeConversationItemContentItemType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemContentItemType? ToEnum(string value) + { + return value switch + { + "input_audio" => RealtimeConversationItemContentItemType.InputAudio, + "input_text" => RealtimeConversationItemContentItemType.InputText, + "text" => RealtimeConversationItemContentItemType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemObject.g.verified.cs new file mode 100644 index 0000000000..12b1fff9fd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeConversationItemObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// Identifier for the API object being returned - always `realtime.item`. + /// + public enum RealtimeConversationItemObject + { + /// + /// + /// + RealtimeItem, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemObject value) + { + return value switch + { + RealtimeConversationItemObject.RealtimeItem => "realtime.item", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemObject? ToEnum(string value) + { + return value switch + { + "realtime.item" => RealtimeConversationItemObject.RealtimeItem, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemRole.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemRole.g.verified.cs new file mode 100644 index 0000000000..fc1898f797 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemRole.g.verified.cs @@ -0,0 +1,59 @@ +//HintName: G.Models.RealtimeConversationItemRole.g.cs + +#nullable enable + +namespace G +{ + /// + /// The role of the message sender (`user`, `assistant`, `system`), only
+ /// applicable for `message` items. + ///
+ public enum RealtimeConversationItemRole + { + /// + /// + /// + User, + /// + /// + /// + Assistant, + /// + /// + /// + Systems, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemRoleExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemRole value) + { + return value switch + { + RealtimeConversationItemRole.User => "user", + RealtimeConversationItemRole.Assistant => "assistant", + RealtimeConversationItemRole.Systems => "systems", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemRole? ToEnum(string value) + { + return value switch + { + "user" => RealtimeConversationItemRole.User, + "assistant" => RealtimeConversationItemRole.Assistant, + "systems" => RealtimeConversationItemRole.Systems, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemStatus.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemStatus.g.verified.cs new file mode 100644 index 0000000000..4d921335ee --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemStatus.g.verified.cs @@ -0,0 +1,54 @@ +//HintName: G.Models.RealtimeConversationItemStatus.g.cs + +#nullable enable + +namespace G +{ + /// + /// The status of the item (`completed`, `incomplete`). These have no effect
+ /// on the conversation, but are accepted for consistency with the
+ /// `conversation.item.created` event. + ///
+ public enum RealtimeConversationItemStatus + { + /// + /// + /// + Completed, + /// + /// + /// + Incomplete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemStatusExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemStatus value) + { + return value switch + { + RealtimeConversationItemStatus.Completed => "completed", + RealtimeConversationItemStatus.Incomplete => "incomplete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemStatus? ToEnum(string value) + { + return value switch + { + "completed" => RealtimeConversationItemStatus.Completed, + "incomplete" => RealtimeConversationItemStatus.Incomplete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemType.g.verified.cs new file mode 100644 index 0000000000..5bafc937f7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeConversationItemType.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.RealtimeConversationItemType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the item (`message`, `function_call`, `function_call_output`). + /// + public enum RealtimeConversationItemType + { + /// + /// + /// + Message, + /// + /// + /// + FunctionCall, + /// + /// + /// + FunctionCallOutput, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeConversationItemTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeConversationItemType value) + { + return value switch + { + RealtimeConversationItemType.Message => "message", + RealtimeConversationItemType.FunctionCall => "function_call", + RealtimeConversationItemType.FunctionCallOutput => "function_call_output", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeConversationItemType? ToEnum(string value) + { + return value switch + { + "message" => RealtimeConversationItemType.Message, + "function_call" => RealtimeConversationItemType.FunctionCall, + "function_call_output" => RealtimeConversationItemType.FunctionCallOutput, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponse.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponse.Json.g.verified.cs new file mode 100644 index 0000000000..3a66ed23f2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponse.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeResponse.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponse + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeResponse? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeResponse), + jsonSerializerContext) as global::G.RealtimeResponse; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponse? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeResponse), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeResponse; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponse.g.verified.cs new file mode 100644 index 0000000000..deab9920a7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponse.g.verified.cs @@ -0,0 +1,109 @@ +//HintName: G.Models.RealtimeResponse.g.cs + +#nullable enable + +namespace G +{ + /// + /// The response resource. + /// + public sealed partial class RealtimeResponse + { + /// + /// The unique ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be `realtime.response`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeResponseObjectJsonConverter))] + public global::G.RealtimeResponseObject? Object { get; set; } + + /// + /// The final status of the response (`completed`, `cancelled`, `failed`, or
+ /// `incomplete`). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("status")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeResponseStatusJsonConverter))] + public global::G.RealtimeResponseStatus? Status { get; set; } + + /// + /// Additional details about the status. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("status_details")] + public global::G.RealtimeResponseStatusDetails? StatusDetails { get; set; } + + /// + /// The list of output items generated by the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output")] + public global::System.Collections.Generic.IList? Output { get; set; } + + /// + /// Usage statistics for the Response, this will correspond to billing. A
+ /// Realtime API session will maintain a conversation context and append new
+ /// Items to the Conversation, thus output from previous turns (text and
+ /// audio tokens) will become the input for later turns. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("usage")] + public global::G.RealtimeResponseUsage? Usage { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the response. + /// + /// + /// The object type, must be `realtime.response`. + /// + /// + /// The final status of the response (`completed`, `cancelled`, `failed`, or
+ /// `incomplete`). + /// + /// + /// Additional details about the status. + /// + /// + /// The list of output items generated by the response. + /// + /// + /// Usage statistics for the Response, this will correspond to billing. A
+ /// Realtime API session will maintain a conversation context and append new
+ /// Items to the Conversation, thus output from previous turns (text and
+ /// audio tokens) will become the input for later turns. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponse( + string? id, + global::G.RealtimeResponseObject? @object, + global::G.RealtimeResponseStatus? status, + global::G.RealtimeResponseStatusDetails? statusDetails, + global::System.Collections.Generic.IList? output, + global::G.RealtimeResponseUsage? usage) + { + this.Id = id; + this.Object = @object; + this.Status = status; + this.StatusDetails = statusDetails; + this.Output = output; + this.Usage = usage; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponse() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseObject.g.verified.cs new file mode 100644 index 0000000000..0e54ad0ce5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeResponseObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// The object type, must be `realtime.response`. + /// + public enum RealtimeResponseObject + { + /// + /// + /// + RealtimeResponse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseObject value) + { + return value switch + { + RealtimeResponseObject.RealtimeResponse => "realtime.response", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseObject? ToEnum(string value) + { + return value switch + { + "realtime.response" => RealtimeResponseObject.RealtimeResponse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatus.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatus.g.verified.cs new file mode 100644 index 0000000000..4b8385daee --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatus.g.verified.cs @@ -0,0 +1,65 @@ +//HintName: G.Models.RealtimeResponseStatus.g.cs + +#nullable enable + +namespace G +{ + /// + /// The final status of the response (`completed`, `cancelled`, `failed`, or
+ /// `incomplete`). + ///
+ public enum RealtimeResponseStatus + { + /// + /// + /// + Completed, + /// + /// + /// + Cancelled, + /// + /// + /// + Failed, + /// + /// + /// + Incomplete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseStatusExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseStatus value) + { + return value switch + { + RealtimeResponseStatus.Completed => "completed", + RealtimeResponseStatus.Cancelled => "cancelled", + RealtimeResponseStatus.Failed => "failed", + RealtimeResponseStatus.Incomplete => "incomplete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseStatus? ToEnum(string value) + { + return value switch + { + "completed" => RealtimeResponseStatus.Completed, + "cancelled" => RealtimeResponseStatus.Cancelled, + "failed" => RealtimeResponseStatus.Failed, + "incomplete" => RealtimeResponseStatus.Incomplete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetails.Json.g.verified.cs new file mode 100644 index 0000000000..09391052bd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetails.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeResponseStatusDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseStatusDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeResponseStatusDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeResponseStatusDetails), + jsonSerializerContext) as global::G.RealtimeResponseStatusDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseStatusDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeResponseStatusDetails), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeResponseStatusDetails; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetails.g.verified.cs new file mode 100644 index 0000000000..f55c4b06e3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetails.g.verified.cs @@ -0,0 +1,80 @@ +//HintName: G.Models.RealtimeResponseStatusDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Additional details about the status. + /// + public sealed partial class RealtimeResponseStatusDetails + { + /// + /// The type of error that caused the response to fail, corresponding
+ /// with the `status` field (`cancelled`, `incomplete`, `failed`). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeResponseStatusDetailsTypeJsonConverter))] + public global::G.RealtimeResponseStatusDetailsType? Type { get; set; } + + /// + /// The reason the Response did not complete. For a `cancelled` Response,
+ /// one of `turn_detected` (the server VAD detected a new start of speech)
+ /// or `client_cancelled` (the client sent a cancel event). For an
+ /// `incomplete` Response, one of `max_output_tokens` or `content_filter`
+ /// (the server-side safety filter activated and cut off the response). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("reason")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeResponseStatusDetailsReasonJsonConverter))] + public global::G.RealtimeResponseStatusDetailsReason? Reason { get; set; } + + /// + /// A description of the error that caused the response to fail,
+ /// populated when the `status` is `failed`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("error")] + public global::G.RealtimeResponseStatusDetailsError? Error { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error that caused the response to fail, corresponding
+ /// with the `status` field (`cancelled`, `incomplete`, `failed`). + /// + /// + /// The reason the Response did not complete. For a `cancelled` Response,
+ /// one of `turn_detected` (the server VAD detected a new start of speech)
+ /// or `client_cancelled` (the client sent a cancel event). For an
+ /// `incomplete` Response, one of `max_output_tokens` or `content_filter`
+ /// (the server-side safety filter activated and cut off the response). + /// + /// + /// A description of the error that caused the response to fail,
+ /// populated when the `status` is `failed`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponseStatusDetails( + global::G.RealtimeResponseStatusDetailsType? type, + global::G.RealtimeResponseStatusDetailsReason? reason, + global::G.RealtimeResponseStatusDetailsError? error) + { + this.Type = type; + this.Reason = reason; + this.Error = error; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseStatusDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsError.Json.g.verified.cs new file mode 100644 index 0000000000..f0d0717ebc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsError.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseStatusDetailsError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeResponseStatusDetailsError? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeResponseStatusDetailsError), + jsonSerializerContext) as global::G.RealtimeResponseStatusDetailsError; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseStatusDetailsError? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeResponseStatusDetailsError), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeResponseStatusDetailsError; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsError.g.verified.cs new file mode 100644 index 0000000000..72f73786ad --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsError.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsError.g.cs + +#nullable enable + +namespace G +{ + /// + /// A description of the error that caused the response to fail,
+ /// populated when the `status` is `failed`. + ///
+ public sealed partial class RealtimeResponseStatusDetailsError + { + /// + /// The type of error. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("code")] + public string? Code { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error. + /// + /// + /// Error code, if any. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponseStatusDetailsError( + string? type, + string? code) + { + this.Type = type; + this.Code = code; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseStatusDetailsError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsReason.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsReason.g.verified.cs new file mode 100644 index 0000000000..92909a87f4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsReason.g.verified.cs @@ -0,0 +1,68 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsReason.g.cs + +#nullable enable + +namespace G +{ + /// + /// The reason the Response did not complete. For a `cancelled` Response,
+ /// one of `turn_detected` (the server VAD detected a new start of speech)
+ /// or `client_cancelled` (the client sent a cancel event). For an
+ /// `incomplete` Response, one of `max_output_tokens` or `content_filter`
+ /// (the server-side safety filter activated and cut off the response). + ///
+ public enum RealtimeResponseStatusDetailsReason + { + /// + /// + /// + TurnDetected, + /// + /// + /// + ClientCancelled, + /// + /// + /// + MaxOutputTokens, + /// + /// + /// + ContentFilter, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseStatusDetailsReasonExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseStatusDetailsReason value) + { + return value switch + { + RealtimeResponseStatusDetailsReason.TurnDetected => "turn_detected", + RealtimeResponseStatusDetailsReason.ClientCancelled => "client_cancelled", + RealtimeResponseStatusDetailsReason.MaxOutputTokens => "max_output_tokens", + RealtimeResponseStatusDetailsReason.ContentFilter => "content_filter", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseStatusDetailsReason? ToEnum(string value) + { + return value switch + { + "turn_detected" => RealtimeResponseStatusDetailsReason.TurnDetected, + "client_cancelled" => RealtimeResponseStatusDetailsReason.ClientCancelled, + "max_output_tokens" => RealtimeResponseStatusDetailsReason.MaxOutputTokens, + "content_filter" => RealtimeResponseStatusDetailsReason.ContentFilter, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsType.g.verified.cs new file mode 100644 index 0000000000..865edc0dee --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseStatusDetailsType.g.verified.cs @@ -0,0 +1,65 @@ +//HintName: G.Models.RealtimeResponseStatusDetailsType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of error that caused the response to fail, corresponding
+ /// with the `status` field (`cancelled`, `incomplete`, `failed`). + ///
+ public enum RealtimeResponseStatusDetailsType + { + /// + /// + /// + Completed, + /// + /// + /// + Cancelled, + /// + /// + /// + Failed, + /// + /// + /// + Incomplete, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeResponseStatusDetailsTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeResponseStatusDetailsType value) + { + return value switch + { + RealtimeResponseStatusDetailsType.Completed => "completed", + RealtimeResponseStatusDetailsType.Cancelled => "cancelled", + RealtimeResponseStatusDetailsType.Failed => "failed", + RealtimeResponseStatusDetailsType.Incomplete => "incomplete", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeResponseStatusDetailsType? ToEnum(string value) + { + return value switch + { + "completed" => RealtimeResponseStatusDetailsType.Completed, + "cancelled" => RealtimeResponseStatusDetailsType.Cancelled, + "failed" => RealtimeResponseStatusDetailsType.Failed, + "incomplete" => RealtimeResponseStatusDetailsType.Incomplete, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsage.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsage.Json.g.verified.cs new file mode 100644 index 0000000000..8809b1a2bd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsage.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeResponseUsage.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseUsage + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeResponseUsage? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeResponseUsage), + jsonSerializerContext) as global::G.RealtimeResponseUsage; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseUsage? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeResponseUsage), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeResponseUsage; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsage.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsage.g.verified.cs new file mode 100644 index 0000000000..99e6b492ea --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsage.g.verified.cs @@ -0,0 +1,97 @@ +//HintName: G.Models.RealtimeResponseUsage.g.cs + +#nullable enable + +namespace G +{ + /// + /// Usage statistics for the Response, this will correspond to billing. A
+ /// Realtime API session will maintain a conversation context and append new
+ /// Items to the Conversation, thus output from previous turns (text and
+ /// audio tokens) will become the input for later turns. + ///
+ public sealed partial class RealtimeResponseUsage + { + /// + /// The total number of tokens in the Response including input and output
+ /// text and audio tokens. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("total_tokens")] + public int? TotalTokens { get; set; } + + /// + /// The number of input tokens used in the Response, including text and
+ /// audio tokens. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("input_tokens")] + public int? InputTokens { get; set; } + + /// + /// The number of output tokens sent in the Response, including text and
+ /// audio tokens. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("output_tokens")] + public int? OutputTokens { get; set; } + + /// + /// Details about the input tokens used in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_token_details")] + public global::G.RealtimeResponseUsageInputTokenDetails? InputTokenDetails { get; set; } + + /// + /// Details about the output tokens used in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_token_details")] + public global::G.RealtimeResponseUsageOutputTokenDetails? OutputTokenDetails { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The total number of tokens in the Response including input and output
+ /// text and audio tokens. + /// + /// + /// The number of input tokens used in the Response, including text and
+ /// audio tokens. + /// + /// + /// The number of output tokens sent in the Response, including text and
+ /// audio tokens. + /// + /// + /// Details about the input tokens used in the Response. + /// + /// + /// Details about the output tokens used in the Response. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponseUsage( + int? totalTokens, + int? inputTokens, + int? outputTokens, + global::G.RealtimeResponseUsageInputTokenDetails? inputTokenDetails, + global::G.RealtimeResponseUsageOutputTokenDetails? outputTokenDetails) + { + this.TotalTokens = totalTokens; + this.InputTokens = inputTokens; + this.OutputTokens = outputTokens; + this.InputTokenDetails = inputTokenDetails; + this.OutputTokenDetails = outputTokenDetails; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseUsage() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.verified.cs new file mode 100644 index 0000000000..019814dd27 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeResponseUsageInputTokenDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseUsageInputTokenDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeResponseUsageInputTokenDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeResponseUsageInputTokenDetails), + jsonSerializerContext) as global::G.RealtimeResponseUsageInputTokenDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseUsageInputTokenDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeResponseUsageInputTokenDetails), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeResponseUsageInputTokenDetails; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.g.verified.cs new file mode 100644 index 0000000000..793bc0db07 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageInputTokenDetails.g.verified.cs @@ -0,0 +1,66 @@ +//HintName: G.Models.RealtimeResponseUsageInputTokenDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details about the input tokens used in the Response. + /// + public sealed partial class RealtimeResponseUsageInputTokenDetails + { + /// + /// The number of cached tokens used in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("cached_tokens")] + public int? CachedTokens { get; set; } + + /// + /// The number of text tokens used in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text_tokens")] + public int? TextTokens { get; set; } + + /// + /// The number of audio tokens used in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The number of cached tokens used in the Response. + /// + /// + /// The number of text tokens used in the Response. + /// + /// + /// The number of audio tokens used in the Response. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponseUsageInputTokenDetails( + int? cachedTokens, + int? textTokens, + int? audioTokens) + { + this.CachedTokens = cachedTokens; + this.TextTokens = textTokens; + this.AudioTokens = audioTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseUsageInputTokenDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.verified.cs new file mode 100644 index 0000000000..5f0160a965 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeResponseUsageOutputTokenDetails.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeResponseUsageOutputTokenDetails + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeResponseUsageOutputTokenDetails? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeResponseUsageOutputTokenDetails), + jsonSerializerContext) as global::G.RealtimeResponseUsageOutputTokenDetails; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeResponseUsageOutputTokenDetails? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeResponseUsageOutputTokenDetails), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeResponseUsageOutputTokenDetails; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.g.verified.cs new file mode 100644 index 0000000000..472bcde148 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeResponseUsageOutputTokenDetails.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.RealtimeResponseUsageOutputTokenDetails.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details about the output tokens used in the Response. + /// + public sealed partial class RealtimeResponseUsageOutputTokenDetails + { + /// + /// The number of text tokens used in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text_tokens")] + public int? TextTokens { get; set; } + + /// + /// The number of audio tokens used in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_tokens")] + public int? AudioTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The number of text tokens used in the Response. + /// + /// + /// The number of audio tokens used in the Response. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeResponseUsageOutputTokenDetails( + int? textTokens, + int? audioTokens) + { + this.TextTokens = textTokens; + this.AudioTokens = audioTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeResponseUsageOutputTokenDetails() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreated.Json.g.verified.cs new file mode 100644 index 0000000000..21adc66efd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationCreated), + jsonSerializerContext) as global::G.RealtimeServerEventConversationCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationCreated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationCreated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreated.g.verified.cs new file mode 100644 index 0000000000..d68a254939 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreated.g.verified.cs @@ -0,0 +1,69 @@ +//HintName: G.Models.RealtimeServerEventConversationCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a conversation is created. Emitted right after session creation. + /// + public sealed partial class RealtimeServerEventConversationCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `conversation.created`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventConversationCreatedTypeJsonConverter))] + public global::G.RealtimeServerEventConversationCreatedType Type { get; set; } + + /// + /// The conversation resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("conversation")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeServerEventConversationCreatedConversation Conversation { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.created`. + /// + /// + /// The conversation resource. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationCreated( + string eventId, + global::G.RealtimeServerEventConversationCreatedConversation conversation, + global::G.RealtimeServerEventConversationCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Conversation = conversation ?? throw new global::System.ArgumentNullException(nameof(conversation)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.verified.cs new file mode 100644 index 0000000000..0924b5a48f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationCreatedConversation.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationCreatedConversation + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationCreatedConversation? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationCreatedConversation), + jsonSerializerContext) as global::G.RealtimeServerEventConversationCreatedConversation; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationCreatedConversation? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationCreatedConversation), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationCreatedConversation; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.g.verified.cs new file mode 100644 index 0000000000..d2aca5b8a0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedConversation.g.verified.cs @@ -0,0 +1,55 @@ +//HintName: G.Models.RealtimeServerEventConversationCreatedConversation.g.cs + +#nullable enable + +namespace G +{ + /// + /// The conversation resource. + /// + public sealed partial class RealtimeServerEventConversationCreatedConversation + { + /// + /// The unique ID of the conversation. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("id")] + public string? Id { get; set; } + + /// + /// The object type, must be `realtime.conversation`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + public string? Object { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the conversation. + /// + /// + /// The object type, must be `realtime.conversation`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationCreatedConversation( + string? id, + string? @object) + { + this.Id = id; + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationCreatedConversation() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedType.g.verified.cs new file mode 100644 index 0000000000..c94511bf6c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationCreatedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventConversationCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.created`. + /// + public enum RealtimeServerEventConversationCreatedType + { + /// + /// + /// + ConversationCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationCreatedType value) + { + return value switch + { + RealtimeServerEventConversationCreatedType.ConversationCreated => "conversation.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationCreatedType? ToEnum(string value) + { + return value switch + { + "conversation.created" => RealtimeServerEventConversationCreatedType.ConversationCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreated.Json.g.verified.cs new file mode 100644 index 0000000000..74bc14fded --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationItemCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationItemCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationItemCreated), + jsonSerializerContext) as global::G.RealtimeServerEventConversationItemCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationItemCreated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationItemCreated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreated.g.verified.cs new file mode 100644 index 0000000000..ca683d6de9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreated.g.verified.cs @@ -0,0 +1,92 @@ +//HintName: G.Models.RealtimeServerEventConversationItemCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a conversation item is created. There are several scenarios that
+ /// produce this event:
+ /// - The server is generating a Response, which if successful will produce
+ /// either one or two Items, which will be of type `message`
+ /// (role `assistant`) or type `function_call`.
+ /// - The input audio buffer has been committed, either by the client or the
+ /// server (in `server_vad` mode). The server will take the content of the
+ /// input audio buffer and add it to a new user message Item.
+ /// - The client has sent a `conversation.item.create` event to add a new Item
+ /// to the Conversation. + ///
+ public sealed partial class RealtimeServerEventConversationItemCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `conversation.item.created`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventConversationItemCreatedTypeJsonConverter))] + public global::G.RealtimeServerEventConversationItemCreatedType Type { get; set; } + + /// + /// The ID of the preceding item in the Conversation context, allows the
+ /// client to understand the order of the conversation. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string PreviousItemId { get; set; } + + /// + /// The item to add to the conversation. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeConversationItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.item.created`. + /// + /// + /// The ID of the preceding item in the Conversation context, allows the
+ /// client to understand the order of the conversation. + /// + /// + /// The item to add to the conversation. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationItemCreated( + string eventId, + string previousItemId, + global::G.RealtimeConversationItem item, + global::G.RealtimeServerEventConversationItemCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.PreviousItemId = previousItemId ?? throw new global::System.ArgumentNullException(nameof(previousItemId)); + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreatedType.g.verified.cs new file mode 100644 index 0000000000..6416a35092 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemCreatedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventConversationItemCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.created`. + /// + public enum RealtimeServerEventConversationItemCreatedType + { + /// + /// + /// + ConversationItemCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemCreatedType value) + { + return value switch + { + RealtimeServerEventConversationItemCreatedType.ConversationItemCreated => "conversation.item.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemCreatedType? ToEnum(string value) + { + return value switch + { + "conversation.item.created" => RealtimeServerEventConversationItemCreatedType.ConversationItemCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeleted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeleted.Json.g.verified.cs new file mode 100644 index 0000000000..391b93c052 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeleted.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationItemDeleted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemDeleted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationItemDeleted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationItemDeleted), + jsonSerializerContext) as global::G.RealtimeServerEventConversationItemDeleted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemDeleted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationItemDeleted), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationItemDeleted; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeleted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeleted.g.verified.cs new file mode 100644 index 0000000000..8e3ea8954d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeleted.g.verified.cs @@ -0,0 +1,71 @@ +//HintName: G.Models.RealtimeServerEventConversationItemDeleted.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an item in the conversation is deleted by the client with a
+ /// `conversation.item.delete` event. This event is used to synchronize the
+ /// server's understanding of the conversation history with the client's view. + ///
+ public sealed partial class RealtimeServerEventConversationItemDeleted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `conversation.item.deleted`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventConversationItemDeletedTypeJsonConverter))] + public global::G.RealtimeServerEventConversationItemDeletedType Type { get; set; } + + /// + /// The ID of the item that was deleted. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.item.deleted`. + /// + /// + /// The ID of the item that was deleted. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationItemDeleted( + string eventId, + string itemId, + global::G.RealtimeServerEventConversationItemDeletedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemDeleted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeletedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeletedType.g.verified.cs new file mode 100644 index 0000000000..b2175219ba --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemDeletedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventConversationItemDeletedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.deleted`. + /// + public enum RealtimeServerEventConversationItemDeletedType + { + /// + /// + /// + ConversationItemDeleted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemDeletedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemDeletedType value) + { + return value switch + { + RealtimeServerEventConversationItemDeletedType.ConversationItemDeleted => "conversation.item.deleted", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemDeletedType? ToEnum(string value) + { + return value switch + { + "conversation.item.deleted" => RealtimeServerEventConversationItemDeletedType.ConversationItemDeleted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.verified.cs new file mode 100644 index 0000000000..d4918d69ec --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted), + jsonSerializerContext) as global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.verified.cs new file mode 100644 index 0000000000..fbd2bf4e34 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.verified.cs @@ -0,0 +1,103 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.g.cs + +#nullable enable + +namespace G +{ + /// + /// This event is the output of audio transcription for user audio written to the
+ /// user audio buffer. Transcription begins when the input audio buffer is
+ /// committed by the client or server (in `server_vad` mode). Transcription runs
+ /// asynchronously with Response creation, so this event may come before or after
+ /// the Response events.
+ /// Realtime API models accept audio natively, and thus input transcription is a
+ /// separate process run on a separate ASR (Automatic Speech Recognition) model,
+ /// currently always `whisper-1`. Thus the transcript may diverge somewhat from
+ /// the model's interpretation, and should be treated as a rough guide. + ///
+ public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.completed`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeJsonConverter))] + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType Type { get; set; } + + /// + /// The ID of the user message item containing the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part containing the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The transcribed text. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.completed`. + /// + /// + /// The ID of the user message item containing the audio. + /// + /// + /// The index of the content part containing the audio. + /// + /// + /// The transcribed text. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationItemInputAudioTranscriptionCompleted( + string eventId, + string itemId, + int contentIndex, + string transcript, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.Transcript = transcript ?? throw new global::System.ArgumentNullException(nameof(transcript)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionCompleted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs new file mode 100644 index 0000000000..1d9f8ff45e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs @@ -0,0 +1,47 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.completed`. + ///
+ public enum RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType + { + /// + /// + /// + ConversationItemInputAudioTranscriptionCompleted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType value) + { + return value switch + { + RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.ConversationItemInputAudioTranscriptionCompleted => "conversation.item.input_audio_transcription.completed", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType? ToEnum(string value) + { + return value switch + { + "conversation.item.input_audio_transcription.completed" => RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.ConversationItemInputAudioTranscriptionCompleted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.verified.cs new file mode 100644 index 0000000000..fac2ed8318 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailed + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed), + jsonSerializerContext) as global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.verified.cs new file mode 100644 index 0000000000..eb4ebeefcc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.verified.cs @@ -0,0 +1,97 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when input audio transcription is configured, and a transcription
+ /// request for a user message failed. These events are separate from other
+ /// `error` events so that the client can identify the related Item. + ///
+ public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailed + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.failed`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeJsonConverter))] + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType Type { get; set; } + + /// + /// The ID of the user message item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part containing the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Details of the transcription error. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("error")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError Error { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.failed`. + /// + /// + /// The ID of the user message item. + /// + /// + /// The index of the content part containing the audio. + /// + /// + /// Details of the transcription error. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationItemInputAudioTranscriptionFailed( + string eventId, + string itemId, + int contentIndex, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError error, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.Error = error ?? throw new global::System.ArgumentNullException(nameof(error)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionFailed() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.verified.cs new file mode 100644 index 0000000000..df56de48e1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailedError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError), + jsonSerializerContext) as global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.verified.cs new file mode 100644 index 0000000000..c12a123e73 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.verified.cs @@ -0,0 +1,77 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details of the transcription error. + /// + public sealed partial class RealtimeServerEventConversationItemInputAudioTranscriptionFailedError + { + /// + /// The type of error. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("code")] + public string? Code { get; set; } + + /// + /// A human-readable error message. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("message")] + public string? Message { get; set; } + + /// + /// Parameter related to the error, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("param")] + public string? Param { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error. + /// + /// + /// Error code, if any. + /// + /// + /// A human-readable error message. + /// + /// + /// Parameter related to the error, if any. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationItemInputAudioTranscriptionFailedError( + string? type, + string? code, + string? message, + string? param) + { + this.Type = type; + this.Code = code; + this.Message = message; + this.Param = param; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemInputAudioTranscriptionFailedError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs new file mode 100644 index 0000000000..c368f3b3f0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs @@ -0,0 +1,47 @@ +//HintName: G.Models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be
+ /// `conversation.item.input_audio_transcription.failed`. + ///
+ public enum RealtimeServerEventConversationItemInputAudioTranscriptionFailedType + { + /// + /// + /// + ConversationItemInputAudioTranscriptionFailed, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemInputAudioTranscriptionFailedType value) + { + return value switch + { + RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.ConversationItemInputAudioTranscriptionFailed => "conversation.item.input_audio_transcription.failed", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemInputAudioTranscriptionFailedType? ToEnum(string value) + { + return value switch + { + "conversation.item.input_audio_transcription.failed" => RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.ConversationItemInputAudioTranscriptionFailed, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncated.Json.g.verified.cs new file mode 100644 index 0000000000..d71b322ecd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventConversationItemTruncated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventConversationItemTruncated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventConversationItemTruncated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventConversationItemTruncated), + jsonSerializerContext) as global::G.RealtimeServerEventConversationItemTruncated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventConversationItemTruncated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventConversationItemTruncated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventConversationItemTruncated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncated.g.verified.cs new file mode 100644 index 0000000000..a583b3f4a1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncated.g.verified.cs @@ -0,0 +1,97 @@ +//HintName: G.Models.RealtimeServerEventConversationItemTruncated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an earlier assistant audio message item is truncated by the
+ /// client with a `conversation.item.truncate` event. This event is used to
+ /// synchronize the server's understanding of the audio with the client's playback.
+ /// This action will truncate the audio and remove the server-side text transcript
+ /// to ensure there is no text in the context that hasn't been heard by the user. + ///
+ public sealed partial class RealtimeServerEventConversationItemTruncated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `conversation.item.truncated`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventConversationItemTruncatedTypeJsonConverter))] + public global::G.RealtimeServerEventConversationItemTruncatedType Type { get; set; } + + /// + /// The ID of the assistant message item that was truncated. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the content part that was truncated. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The duration up to which the audio was truncated, in milliseconds. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioEndMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `conversation.item.truncated`. + /// + /// + /// The ID of the assistant message item that was truncated. + /// + /// + /// The index of the content part that was truncated. + /// + /// + /// The duration up to which the audio was truncated, in milliseconds. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventConversationItemTruncated( + string eventId, + string itemId, + int contentIndex, + int audioEndMs, + global::G.RealtimeServerEventConversationItemTruncatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.ContentIndex = contentIndex; + this.AudioEndMs = audioEndMs; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventConversationItemTruncated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncatedType.g.verified.cs new file mode 100644 index 0000000000..3ff4386734 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventConversationItemTruncatedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventConversationItemTruncatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `conversation.item.truncated`. + /// + public enum RealtimeServerEventConversationItemTruncatedType + { + /// + /// + /// + ConversationItemTruncated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventConversationItemTruncatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventConversationItemTruncatedType value) + { + return value switch + { + RealtimeServerEventConversationItemTruncatedType.ConversationItemTruncated => "conversation.item.truncated", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventConversationItemTruncatedType? ToEnum(string value) + { + return value switch + { + "conversation.item.truncated" => RealtimeServerEventConversationItemTruncatedType.ConversationItemTruncated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventError.Json.g.verified.cs new file mode 100644 index 0000000000..70e3dfd4b7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventError.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventError? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventError), + jsonSerializerContext) as global::G.RealtimeServerEventError; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventError? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventError), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventError; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventError.g.verified.cs new file mode 100644 index 0000000000..ab64e9843e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventError.g.verified.cs @@ -0,0 +1,71 @@ +//HintName: G.Models.RealtimeServerEventError.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an error occurs, which could be a client problem or a server
+ /// problem. Most errors are recoverable and the session will stay open, we
+ /// recommend to implementors to monitor and log error messages by default. + ///
+ public sealed partial class RealtimeServerEventError + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `error`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventErrorTypeJsonConverter))] + public global::G.RealtimeServerEventErrorType Type { get; set; } + + /// + /// Details of the error. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("error")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeServerEventErrorError Error { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `error`. + /// + /// + /// Details of the error. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventError( + string eventId, + global::G.RealtimeServerEventErrorError error, + global::G.RealtimeServerEventErrorType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Error = error ?? throw new global::System.ArgumentNullException(nameof(error)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorError.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorError.Json.g.verified.cs new file mode 100644 index 0000000000..72a8ffd177 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorError.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventErrorError.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventErrorError + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventErrorError? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventErrorError), + jsonSerializerContext) as global::G.RealtimeServerEventErrorError; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventErrorError? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventErrorError), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventErrorError; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorError.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorError.g.verified.cs new file mode 100644 index 0000000000..6846620a2a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorError.g.verified.cs @@ -0,0 +1,88 @@ +//HintName: G.Models.RealtimeServerEventErrorError.g.cs + +#nullable enable + +namespace G +{ + /// + /// Details of the error. + /// + public sealed partial class RealtimeServerEventErrorError + { + /// + /// The type of error (e.g., "invalid_request_error", "server_error"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Error code, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("code")] + public string? Code { get; set; } + + /// + /// A human-readable error message. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("message")] + public string? Message { get; set; } + + /// + /// Parameter related to the error, if any. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("param")] + public string? Param { get; set; } + + /// + /// The event_id of the client event that caused the error, if applicable. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + public string? EventId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of error (e.g., "invalid_request_error", "server_error"). + /// + /// + /// Error code, if any. + /// + /// + /// A human-readable error message. + /// + /// + /// Parameter related to the error, if any. + /// + /// + /// The event_id of the client event that caused the error, if applicable. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventErrorError( + string? type, + string? code, + string? message, + string? param, + string? eventId) + { + this.Type = type; + this.Code = code; + this.Message = message; + this.Param = param; + this.EventId = eventId; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventErrorError() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorType.g.verified.cs new file mode 100644 index 0000000000..f7ec0ce9c7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventErrorType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventErrorType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `error`. + /// + public enum RealtimeServerEventErrorType + { + /// + /// + /// + Error, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventErrorTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventErrorType value) + { + return value switch + { + RealtimeServerEventErrorType.Error => "error", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventErrorType? ToEnum(string value) + { + return value switch + { + "error" => RealtimeServerEventErrorType.Error, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.verified.cs new file mode 100644 index 0000000000..bd9fab2be6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCleared.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferCleared + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventInputAudioBufferCleared? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventInputAudioBufferCleared), + jsonSerializerContext) as global::G.RealtimeServerEventInputAudioBufferCleared; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferCleared? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventInputAudioBufferCleared), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventInputAudioBufferCleared; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.g.verified.cs new file mode 100644 index 0000000000..20fd58d60c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCleared.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCleared.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the input audio buffer is cleared by the client with a
+ /// `input_audio_buffer.clear` event. + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferCleared + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.cleared`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferClearedTypeJsonConverter))] + public global::G.RealtimeServerEventInputAudioBufferClearedType Type { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.cleared`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventInputAudioBufferCleared( + string eventId, + global::G.RealtimeServerEventInputAudioBufferClearedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferCleared() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs new file mode 100644 index 0000000000..15d00ee251 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferClearedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.cleared`. + /// + public enum RealtimeServerEventInputAudioBufferClearedType + { + /// + /// + /// + InputAudioBufferCleared, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferClearedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferClearedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferClearedType.InputAudioBufferCleared => "input_audio_buffer.cleared", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferClearedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.cleared" => RealtimeServerEventInputAudioBufferClearedType.InputAudioBufferCleared, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.verified.cs new file mode 100644 index 0000000000..802ced17a4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCommitted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferCommitted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventInputAudioBufferCommitted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventInputAudioBufferCommitted), + jsonSerializerContext) as global::G.RealtimeServerEventInputAudioBufferCommitted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferCommitted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventInputAudioBufferCommitted), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventInputAudioBufferCommitted; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.g.verified.cs new file mode 100644 index 0000000000..0086cfa5b9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommitted.g.verified.cs @@ -0,0 +1,84 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCommitted.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an input audio buffer is committed, either by the client or
+ /// automatically in server VAD mode. The `item_id` property is the ID of the user
+ /// message item that will be created, thus a `conversation.item.created` event
+ /// will also be sent to the client. + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferCommitted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.committed`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferCommittedTypeJsonConverter))] + public global::G.RealtimeServerEventInputAudioBufferCommittedType Type { get; set; } + + /// + /// The ID of the preceding item after which the new item will be inserted. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("previous_item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string PreviousItemId { get; set; } + + /// + /// The ID of the user message item that will be created. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.committed`. + /// + /// + /// The ID of the preceding item after which the new item will be inserted. + /// + /// + /// The ID of the user message item that will be created. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventInputAudioBufferCommitted( + string eventId, + string previousItemId, + string itemId, + global::G.RealtimeServerEventInputAudioBufferCommittedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.PreviousItemId = previousItemId ?? throw new global::System.ArgumentNullException(nameof(previousItemId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferCommitted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs new file mode 100644 index 0000000000..d8238e6ab8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferCommittedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.committed`. + /// + public enum RealtimeServerEventInputAudioBufferCommittedType + { + /// + /// + /// + InputAudioBufferCommitted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferCommittedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferCommittedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferCommittedType.InputAudioBufferCommitted => "input_audio_buffer.committed", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferCommittedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.committed" => RealtimeServerEventInputAudioBufferCommittedType.InputAudioBufferCommitted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.verified.cs new file mode 100644 index 0000000000..6b78afc71a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferSpeechStarted + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventInputAudioBufferSpeechStarted? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventInputAudioBufferSpeechStarted), + jsonSerializerContext) as global::G.RealtimeServerEventInputAudioBufferSpeechStarted; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferSpeechStarted? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventInputAudioBufferSpeechStarted), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventInputAudioBufferSpeechStarted; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.verified.cs new file mode 100644 index 0000000000..dcba9de699 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.verified.cs @@ -0,0 +1,95 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStarted.g.cs + +#nullable enable + +namespace G +{ + /// + /// Sent by the server when in `server_vad` mode to indicate that speech has been
+ /// detected in the audio buffer. This can happen any time audio is added to the
+ /// buffer (unless speech is already detected). The client may want to use this
+ /// event to interrupt audio playback or provide visual feedback to the user.
+ /// The client should expect to receive a `input_audio_buffer.speech_stopped` event
+ /// when speech stops. The `item_id` property is the ID of the user message item
+ /// that will be created when speech stops and will also be included in the
+ /// `input_audio_buffer.speech_stopped` event (unless the client manually commits
+ /// the audio buffer during VAD activation). + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferSpeechStarted + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.speech_started`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedTypeJsonConverter))] + public global::G.RealtimeServerEventInputAudioBufferSpeechStartedType Type { get; set; } + + /// + /// Milliseconds from the start of all audio written to the buffer during the
+ /// session when speech was first detected. This will correspond to the
+ /// beginning of audio sent to the model, and thus includes the
+ /// `prefix_padding_ms` configured in the Session. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio_start_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioStartMs { get; set; } + + /// + /// The ID of the user message item that will be created when speech stops. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.speech_started`. + /// + /// + /// Milliseconds from the start of all audio written to the buffer during the
+ /// session when speech was first detected. This will correspond to the
+ /// beginning of audio sent to the model, and thus includes the
+ /// `prefix_padding_ms` configured in the Session. + /// + /// + /// The ID of the user message item that will be created when speech stops. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventInputAudioBufferSpeechStarted( + string eventId, + int audioStartMs, + string itemId, + global::G.RealtimeServerEventInputAudioBufferSpeechStartedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.AudioStartMs = audioStartMs; + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferSpeechStarted() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs new file mode 100644 index 0000000000..9d7f325e52 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStartedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.speech_started`. + /// + public enum RealtimeServerEventInputAudioBufferSpeechStartedType + { + /// + /// + /// + InputAudioBufferSpeechStarted, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferSpeechStartedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferSpeechStartedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferSpeechStartedType.InputAudioBufferSpeechStarted => "input_audio_buffer.speech_started", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferSpeechStartedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.speech_started" => RealtimeServerEventInputAudioBufferSpeechStartedType.InputAudioBufferSpeechStarted, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.verified.cs new file mode 100644 index 0000000000..fe24baedec --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventInputAudioBufferSpeechStopped + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventInputAudioBufferSpeechStopped? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventInputAudioBufferSpeechStopped), + jsonSerializerContext) as global::G.RealtimeServerEventInputAudioBufferSpeechStopped; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventInputAudioBufferSpeechStopped? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventInputAudioBufferSpeechStopped), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventInputAudioBufferSpeechStopped; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.verified.cs new file mode 100644 index 0000000000..3d0aa85dd5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStopped.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned in `server_vad` mode when the server detects the end of speech in
+ /// the audio buffer. The server will also send an `conversation.item.created`
+ /// event with the user message item that is created from the audio buffer. + ///
+ public sealed partial class RealtimeServerEventInputAudioBufferSpeechStopped + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `input_audio_buffer.speech_stopped`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeJsonConverter))] + public global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType Type { get; set; } + + /// + /// Milliseconds since the session started when speech stopped. This will
+ /// correspond to the end of audio sent to the model, and thus includes the
+ /// `min_silence_duration_ms` configured in the Session. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("audio_end_ms")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int AudioEndMs { get; set; } + + /// + /// The ID of the user message item that will be created. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `input_audio_buffer.speech_stopped`. + /// + /// + /// Milliseconds since the session started when speech stopped. This will
+ /// correspond to the end of audio sent to the model, and thus includes the
+ /// `min_silence_duration_ms` configured in the Session. + /// + /// + /// The ID of the user message item that will be created. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventInputAudioBufferSpeechStopped( + string eventId, + int audioEndMs, + string itemId, + global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.AudioEndMs = audioEndMs; + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventInputAudioBufferSpeechStopped() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs new file mode 100644 index 0000000000..7535f6fcf2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `input_audio_buffer.speech_stopped`. + /// + public enum RealtimeServerEventInputAudioBufferSpeechStoppedType + { + /// + /// + /// + InputAudioBufferSpeechStopped, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventInputAudioBufferSpeechStoppedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventInputAudioBufferSpeechStoppedType value) + { + return value switch + { + RealtimeServerEventInputAudioBufferSpeechStoppedType.InputAudioBufferSpeechStopped => "input_audio_buffer.speech_stopped", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventInputAudioBufferSpeechStoppedType? ToEnum(string value) + { + return value switch + { + "input_audio_buffer.speech_stopped" => RealtimeServerEventInputAudioBufferSpeechStoppedType.InputAudioBufferSpeechStopped, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.verified.cs new file mode 100644 index 0000000000..b3447dcff0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventRateLimitsUpdated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventRateLimitsUpdated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventRateLimitsUpdated), + jsonSerializerContext) as global::G.RealtimeServerEventRateLimitsUpdated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventRateLimitsUpdated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventRateLimitsUpdated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventRateLimitsUpdated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.g.verified.cs new file mode 100644 index 0000000000..78fb64a0a7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdated.g.verified.cs @@ -0,0 +1,72 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Emitted at the beginning of a Response to indicate the updated rate limits.
+ /// When a Response is created some tokens will be "reserved" for the output
+ /// tokens, the rate limits shown here reflect that reservation, which is then
+ /// adjusted accordingly once the Response is completed. + ///
+ public sealed partial class RealtimeServerEventRateLimitsUpdated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `rate_limits.updated`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeJsonConverter))] + public global::G.RealtimeServerEventRateLimitsUpdatedType Type { get; set; } + + /// + /// List of rate limit information. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("rate_limits")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList RateLimits { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `rate_limits.updated`. + /// + /// + /// List of rate limit information. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventRateLimitsUpdated( + string eventId, + global::System.Collections.Generic.IList rateLimits, + global::G.RealtimeServerEventRateLimitsUpdatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.RateLimits = rateLimits ?? throw new global::System.ArgumentNullException(nameof(rateLimits)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventRateLimitsUpdated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.verified.cs new file mode 100644 index 0000000000..ebec27f002 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventRateLimitsUpdatedRateLimit? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventRateLimitsUpdatedRateLimit), + jsonSerializerContext) as global::G.RealtimeServerEventRateLimitsUpdatedRateLimit; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventRateLimitsUpdatedRateLimit? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventRateLimitsUpdatedRateLimit), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventRateLimitsUpdatedRateLimit; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.verified.cs new file mode 100644 index 0000000000..d1ff4651f2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.verified.cs @@ -0,0 +1,77 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdatedRateLimit.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RealtimeServerEventRateLimitsUpdatedRateLimit + { + /// + /// The name of the rate limit (`requests`, `tokens`). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The maximum allowed value for the rate limit. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("limit")] + public int? Limit { get; set; } + + /// + /// The remaining value before the limit is reached. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("remaining")] + public int? Remaining { get; set; } + + /// + /// Seconds until the rate limit resets. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("reset_seconds")] + public double? ResetSeconds { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The name of the rate limit (`requests`, `tokens`). + /// + /// + /// The maximum allowed value for the rate limit. + /// + /// + /// The remaining value before the limit is reached. + /// + /// + /// Seconds until the rate limit resets. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventRateLimitsUpdatedRateLimit( + string? name, + int? limit, + int? remaining, + double? resetSeconds) + { + this.Name = name; + this.Limit = limit; + this.Remaining = remaining; + this.ResetSeconds = resetSeconds; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventRateLimitsUpdatedRateLimit() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs new file mode 100644 index 0000000000..2aa0d14e52 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventRateLimitsUpdatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `rate_limits.updated`. + /// + public enum RealtimeServerEventRateLimitsUpdatedType + { + /// + /// + /// + RateLimitsUpdated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventRateLimitsUpdatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventRateLimitsUpdatedType value) + { + return value switch + { + RealtimeServerEventRateLimitsUpdatedType.RateLimitsUpdated => "rate_limits.updated", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventRateLimitsUpdatedType? ToEnum(string value) + { + return value switch + { + "rate_limits.updated" => RealtimeServerEventRateLimitsUpdatedType.RateLimitsUpdated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDelta.Json.g.verified.cs new file mode 100644 index 0000000000..8e08a1933a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDelta.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseAudioDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseAudioDelta), + jsonSerializerContext) as global::G.RealtimeServerEventResponseAudioDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseAudioDelta), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseAudioDelta; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDelta.g.verified.cs new file mode 100644 index 0000000000..ea748f38e9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDelta.g.verified.cs @@ -0,0 +1,117 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated audio is updated. + /// + public sealed partial class RealtimeServerEventResponseAudioDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.audio.delta`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioDeltaTypeJsonConverter))] + public global::G.RealtimeServerEventResponseAudioDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Base64-encoded audio data delta. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// Base64-encoded audio data delta. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseAudioDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string delta, + global::G.RealtimeServerEventResponseAudioDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDeltaType.g.verified.cs new file mode 100644 index 0000000000..2d5683f139 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDeltaType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio.delta`. + /// + public enum RealtimeServerEventResponseAudioDeltaType + { + /// + /// + /// + ResponseAudioDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioDeltaType value) + { + return value switch + { + RealtimeServerEventResponseAudioDeltaType.ResponseAudioDelta => "response.audio.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioDeltaType? ToEnum(string value) + { + return value switch + { + "response.audio.delta" => RealtimeServerEventResponseAudioDeltaType.ResponseAudioDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDone.Json.g.verified.cs new file mode 100644 index 0000000000..c83cb1849f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDone.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseAudioDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseAudioDone), + jsonSerializerContext) as global::G.RealtimeServerEventResponseAudioDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseAudioDone), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseAudioDone; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDone.g.verified.cs new file mode 100644 index 0000000000..66e01d98ff --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDone.g.verified.cs @@ -0,0 +1,106 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated audio is done. Also emitted when a Response
+ /// is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseAudioDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.audio.done`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioDoneTypeJsonConverter))] + public global::G.RealtimeServerEventResponseAudioDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseAudioDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + global::G.RealtimeServerEventResponseAudioDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDoneType.g.verified.cs new file mode 100644 index 0000000000..014de596d9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioDoneType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio.done`. + /// + public enum RealtimeServerEventResponseAudioDoneType + { + /// + /// + /// + ResponseAudioDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioDoneType value) + { + return value switch + { + RealtimeServerEventResponseAudioDoneType.ResponseAudioDone => "response.audio.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioDoneType? ToEnum(string value) + { + return value switch + { + "response.audio.done" => RealtimeServerEventResponseAudioDoneType.ResponseAudioDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.verified.cs new file mode 100644 index 0000000000..1d61a7d2cd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioTranscriptDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseAudioTranscriptDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseAudioTranscriptDelta), + jsonSerializerContext) as global::G.RealtimeServerEventResponseAudioTranscriptDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioTranscriptDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseAudioTranscriptDelta), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseAudioTranscriptDelta; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.verified.cs new file mode 100644 index 0000000000..761eb3939f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.verified.cs @@ -0,0 +1,117 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated transcription of audio output is updated. + /// + public sealed partial class RealtimeServerEventResponseAudioTranscriptDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.audio_transcript.delta`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaTypeJsonConverter))] + public global::G.RealtimeServerEventResponseAudioTranscriptDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The transcript delta. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio_transcript.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The transcript delta. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseAudioTranscriptDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string delta, + global::G.RealtimeServerEventResponseAudioTranscriptDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioTranscriptDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs new file mode 100644 index 0000000000..89689b4999 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio_transcript.delta`. + /// + public enum RealtimeServerEventResponseAudioTranscriptDeltaType + { + /// + /// + /// + ResponseAudioTranscriptDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioTranscriptDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioTranscriptDeltaType value) + { + return value switch + { + RealtimeServerEventResponseAudioTranscriptDeltaType.ResponseAudioTranscriptDelta => "response.audio_transcript.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioTranscriptDeltaType? ToEnum(string value) + { + return value switch + { + "response.audio_transcript.delta" => RealtimeServerEventResponseAudioTranscriptDeltaType.ResponseAudioTranscriptDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.verified.cs new file mode 100644 index 0000000000..913cee9466 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseAudioTranscriptDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseAudioTranscriptDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseAudioTranscriptDone), + jsonSerializerContext) as global::G.RealtimeServerEventResponseAudioTranscriptDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseAudioTranscriptDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseAudioTranscriptDone), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseAudioTranscriptDone; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.verified.cs new file mode 100644 index 0000000000..f861c6867e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.verified.cs @@ -0,0 +1,119 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated transcription of audio output is done
+ /// streaming. Also emitted when a Response is interrupted, incomplete, or
+ /// cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseAudioTranscriptDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.audio_transcript.done`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneTypeJsonConverter))] + public global::G.RealtimeServerEventResponseAudioTranscriptDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The final transcript of the audio. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.audio_transcript.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The final transcript of the audio. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseAudioTranscriptDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string transcript, + global::G.RealtimeServerEventResponseAudioTranscriptDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Transcript = transcript ?? throw new global::System.ArgumentNullException(nameof(transcript)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseAudioTranscriptDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs new file mode 100644 index 0000000000..37d2e95a24 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseAudioTranscriptDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.audio_transcript.done`. + /// + public enum RealtimeServerEventResponseAudioTranscriptDoneType + { + /// + /// + /// + ResponseAudioTranscriptDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseAudioTranscriptDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseAudioTranscriptDoneType value) + { + return value switch + { + RealtimeServerEventResponseAudioTranscriptDoneType.ResponseAudioTranscriptDone => "response.audio_transcript.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseAudioTranscriptDoneType? ToEnum(string value) + { + return value switch + { + "response.audio_transcript.done" => RealtimeServerEventResponseAudioTranscriptDoneType.ResponseAudioTranscriptDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.verified.cs new file mode 100644 index 0000000000..2cc162d361 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAdded.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartAdded + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseContentPartAdded? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseContentPartAdded), + jsonSerializerContext) as global::G.RealtimeServerEventResponseContentPartAdded; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartAdded? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseContentPartAdded), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseContentPartAdded; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.g.verified.cs new file mode 100644 index 0000000000..39bba24e99 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAdded.g.verified.cs @@ -0,0 +1,118 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAdded.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a new content part is added to an assistant message item during
+ /// response generation. + ///
+ public sealed partial class RealtimeServerEventResponseContentPartAdded + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.content_part.added`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartAddedTypeJsonConverter))] + public global::G.RealtimeServerEventResponseContentPartAddedType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item to which the content part was added. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The content part that was added. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("part")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeServerEventResponseContentPartAddedPart Part { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.content_part.added`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item to which the content part was added. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The content part that was added. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseContentPartAdded( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + global::G.RealtimeServerEventResponseContentPartAddedPart part, + global::G.RealtimeServerEventResponseContentPartAddedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Part = part ?? throw new global::System.ArgumentNullException(nameof(part)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartAdded() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.verified.cs new file mode 100644 index 0000000000..7a25d57639 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedPart.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartAddedPart + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseContentPartAddedPart? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseContentPartAddedPart), + jsonSerializerContext) as global::G.RealtimeServerEventResponseContentPartAddedPart; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartAddedPart? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseContentPartAddedPart), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseContentPartAddedPart; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.g.verified.cs new file mode 100644 index 0000000000..10a002dbcd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPart.g.verified.cs @@ -0,0 +1,78 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedPart.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content part that was added. + /// + public sealed partial class RealtimeServerEventResponseContentPartAddedPart + { + /// + /// The content type ("text", "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeJsonConverter))] + public global::G.RealtimeServerEventResponseContentPartAddedPartType? Type { get; set; } + + /// + /// The text content (if type is "text"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The content type ("text", "audio"). + /// + /// + /// The text content (if type is "text"). + /// + /// + /// Base64-encoded audio data (if type is "audio"). + /// + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseContentPartAddedPart( + global::G.RealtimeServerEventResponseContentPartAddedPartType? type, + string? text, + string? audio, + string? transcript) + { + this.Type = type; + this.Text = text; + this.Audio = audio; + this.Transcript = transcript; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartAddedPart() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs new file mode 100644 index 0000000000..50861e4828 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedPartType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content type ("text", "audio"). + /// + public enum RealtimeServerEventResponseContentPartAddedPartType + { + /// + /// + /// + Audio, + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseContentPartAddedPartTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseContentPartAddedPartType value) + { + return value switch + { + RealtimeServerEventResponseContentPartAddedPartType.Audio => "audio", + RealtimeServerEventResponseContentPartAddedPartType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseContentPartAddedPartType? ToEnum(string value) + { + return value switch + { + "audio" => RealtimeServerEventResponseContentPartAddedPartType.Audio, + "text" => RealtimeServerEventResponseContentPartAddedPartType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedType.g.verified.cs new file mode 100644 index 0000000000..14ea070c68 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartAddedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartAddedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.content_part.added`. + /// + public enum RealtimeServerEventResponseContentPartAddedType + { + /// + /// + /// + ResponseContentPartAdded, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseContentPartAddedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseContentPartAddedType value) + { + return value switch + { + RealtimeServerEventResponseContentPartAddedType.ResponseContentPartAdded => "response.content_part.added", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseContentPartAddedType? ToEnum(string value) + { + return value switch + { + "response.content_part.added" => RealtimeServerEventResponseContentPartAddedType.ResponseContentPartAdded, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDone.Json.g.verified.cs new file mode 100644 index 0000000000..bceacc59f4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDone.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseContentPartDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseContentPartDone), + jsonSerializerContext) as global::G.RealtimeServerEventResponseContentPartDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseContentPartDone), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseContentPartDone; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDone.g.verified.cs new file mode 100644 index 0000000000..4a501595d5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDone.g.verified.cs @@ -0,0 +1,118 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a content part is done streaming in an assistant message item.
+ /// Also emitted when a Response is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseContentPartDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.content_part.done`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartDoneTypeJsonConverter))] + public global::G.RealtimeServerEventResponseContentPartDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The content part that is done. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("part")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeServerEventResponseContentPartDonePart Part { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.content_part.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The content part that is done. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseContentPartDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + global::G.RealtimeServerEventResponseContentPartDonePart part, + global::G.RealtimeServerEventResponseContentPartDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Part = part ?? throw new global::System.ArgumentNullException(nameof(part)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.verified.cs new file mode 100644 index 0000000000..2fc01e77a1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDonePart.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseContentPartDonePart + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseContentPartDonePart? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseContentPartDonePart), + jsonSerializerContext) as global::G.RealtimeServerEventResponseContentPartDonePart; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseContentPartDonePart? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseContentPartDonePart), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseContentPartDonePart; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.g.verified.cs new file mode 100644 index 0000000000..fd7f3f61a6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDonePart.g.verified.cs @@ -0,0 +1,77 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDonePart.g.cs + +#nullable enable + +namespace G +{ + /// + /// The content part that is done. + /// + public sealed partial class RealtimeServerEventResponseContentPartDonePart + { + /// + /// The content type ("text", "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// The text content (if type is "text"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Base64-encoded audio data (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("audio")] + public string? Audio { get; set; } + + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::System.Text.Json.Serialization.JsonPropertyName("transcript")] + public string? Transcript { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The content type ("text", "audio"). + /// + /// + /// The text content (if type is "text"). + /// + /// + /// Base64-encoded audio data (if type is "audio"). + /// + /// + /// The transcript of the audio (if type is "audio"). + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseContentPartDonePart( + string? type, + string? text, + string? audio, + string? transcript) + { + this.Type = type; + this.Text = text; + this.Audio = audio; + this.Transcript = transcript; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseContentPartDonePart() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDoneType.g.verified.cs new file mode 100644 index 0000000000..a5010f17ad --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseContentPartDoneType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseContentPartDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.content_part.done`. + /// + public enum RealtimeServerEventResponseContentPartDoneType + { + /// + /// + /// + ResponseContentPartDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseContentPartDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseContentPartDoneType value) + { + return value switch + { + RealtimeServerEventResponseContentPartDoneType.ResponseContentPartDone => "response.content_part.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseContentPartDoneType? ToEnum(string value) + { + return value switch + { + "response.content_part.done" => RealtimeServerEventResponseContentPartDoneType.ResponseContentPartDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreated.Json.g.verified.cs new file mode 100644 index 0000000000..fe1a1a6554 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseCreated), + jsonSerializerContext) as global::G.RealtimeServerEventResponseCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseCreated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseCreated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreated.g.verified.cs new file mode 100644 index 0000000000..1430710f97 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreated.g.verified.cs @@ -0,0 +1,70 @@ +//HintName: G.Models.RealtimeServerEventResponseCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a new Response is created. The first event of response creation,
+ /// where the response is in an initial state of `in_progress`. + ///
+ public sealed partial class RealtimeServerEventResponseCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.created`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseCreatedTypeJsonConverter))] + public global::G.RealtimeServerEventResponseCreatedType Type { get; set; } + + /// + /// The response resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeResponse Response { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.created`. + /// + /// + /// The response resource. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseCreated( + string eventId, + global::G.RealtimeResponse response, + global::G.RealtimeServerEventResponseCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Response = response ?? throw new global::System.ArgumentNullException(nameof(response)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreatedType.g.verified.cs new file mode 100644 index 0000000000..018a58b6dd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseCreatedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.created`. + /// + public enum RealtimeServerEventResponseCreatedType + { + /// + /// + /// + ResponseCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseCreatedType value) + { + return value switch + { + RealtimeServerEventResponseCreatedType.ResponseCreated => "response.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseCreatedType? ToEnum(string value) + { + return value switch + { + "response.created" => RealtimeServerEventResponseCreatedType.ResponseCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDone.Json.g.verified.cs new file mode 100644 index 0000000000..1db9978d02 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDone.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseDone), + jsonSerializerContext) as global::G.RealtimeServerEventResponseDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseDone), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseDone; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDone.g.verified.cs new file mode 100644 index 0000000000..e334a454e2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDone.g.verified.cs @@ -0,0 +1,71 @@ +//HintName: G.Models.RealtimeServerEventResponseDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a Response is done streaming. Always emitted, no matter the
+ /// final state. The Response object included in the `response.done` event will
+ /// include all output Items in the Response but will omit the raw audio data. + ///
+ public sealed partial class RealtimeServerEventResponseDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.done`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseDoneTypeJsonConverter))] + public global::G.RealtimeServerEventResponseDoneType Type { get; set; } + + /// + /// The response resource. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeResponse Response { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.done`. + /// + /// + /// The response resource. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseDone( + string eventId, + global::G.RealtimeResponse response, + global::G.RealtimeServerEventResponseDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Response = response ?? throw new global::System.ArgumentNullException(nameof(response)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDoneType.g.verified.cs new file mode 100644 index 0000000000..bb95f6b240 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseDoneType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.done`. + /// + public enum RealtimeServerEventResponseDoneType + { + /// + /// + /// + ResponseDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseDoneType value) + { + return value switch + { + RealtimeServerEventResponseDoneType.ResponseDone => "response.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseDoneType? ToEnum(string value) + { + return value switch + { + "response.done" => RealtimeServerEventResponseDoneType.ResponseDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.verified.cs new file mode 100644 index 0000000000..bfe65bbb65 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta), + jsonSerializerContext) as global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.verified.cs new file mode 100644 index 0000000000..93d7a13083 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.verified.cs @@ -0,0 +1,117 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated function call arguments are updated. + /// + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.function_call_arguments.delta`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeJsonConverter))] + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the function call item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The ID of the function call. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string CallId { get; set; } + + /// + /// The arguments delta as a JSON string. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.function_call_arguments.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the function call item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The ID of the function call. + /// + /// + /// The arguments delta as a JSON string. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseFunctionCallArgumentsDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + string callId, + string delta, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.CallId = callId ?? throw new global::System.ArgumentNullException(nameof(callId)); + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseFunctionCallArgumentsDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs new file mode 100644 index 0000000000..f765cab7c6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.function_call_arguments.delta`. + /// + public enum RealtimeServerEventResponseFunctionCallArgumentsDeltaType + { + /// + /// + /// + ResponseFunctionCallArgumentsDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseFunctionCallArgumentsDeltaType value) + { + return value switch + { + RealtimeServerEventResponseFunctionCallArgumentsDeltaType.ResponseFunctionCallArgumentsDelta => "response.function_call_arguments.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseFunctionCallArgumentsDeltaType? ToEnum(string value) + { + return value switch + { + "response.function_call_arguments.delta" => RealtimeServerEventResponseFunctionCallArgumentsDeltaType.ResponseFunctionCallArgumentsDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.verified.cs new file mode 100644 index 0000000000..df56342466 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseFunctionCallArgumentsDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseFunctionCallArgumentsDone), + jsonSerializerContext) as global::G.RealtimeServerEventResponseFunctionCallArgumentsDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseFunctionCallArgumentsDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseFunctionCallArgumentsDone), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseFunctionCallArgumentsDone; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.verified.cs new file mode 100644 index 0000000000..19afdd0e76 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.verified.cs @@ -0,0 +1,118 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the model-generated function call arguments are done streaming.
+ /// Also emitted when a Response is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseFunctionCallArgumentsDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.function_call_arguments.done`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeJsonConverter))] + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the function call item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The ID of the function call. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("call_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string CallId { get; set; } + + /// + /// The final arguments as a JSON string. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("arguments")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Arguments { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.function_call_arguments.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the function call item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The ID of the function call. + /// + /// + /// The final arguments as a JSON string. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseFunctionCallArgumentsDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + string callId, + string arguments, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.CallId = callId ?? throw new global::System.ArgumentNullException(nameof(callId)); + this.Arguments = arguments ?? throw new global::System.ArgumentNullException(nameof(arguments)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseFunctionCallArgumentsDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs new file mode 100644 index 0000000000..7db28cf70d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.function_call_arguments.done`. + /// + public enum RealtimeServerEventResponseFunctionCallArgumentsDoneType + { + /// + /// + /// + ResponseFunctionCallArgumentsDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseFunctionCallArgumentsDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseFunctionCallArgumentsDoneType value) + { + return value switch + { + RealtimeServerEventResponseFunctionCallArgumentsDoneType.ResponseFunctionCallArgumentsDone => "response.function_call_arguments.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseFunctionCallArgumentsDoneType? ToEnum(string value) + { + return value switch + { + "response.function_call_arguments.done" => RealtimeServerEventResponseFunctionCallArgumentsDoneType.ResponseFunctionCallArgumentsDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.verified.cs new file mode 100644 index 0000000000..0c67408752 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemAdded.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseOutputItemAdded + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseOutputItemAdded? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseOutputItemAdded), + jsonSerializerContext) as global::G.RealtimeServerEventResponseOutputItemAdded; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseOutputItemAdded? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseOutputItemAdded), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseOutputItemAdded; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.g.verified.cs new file mode 100644 index 0000000000..1e2869dda6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAdded.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemAdded.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a new Item is created during Response generation. + /// + public sealed partial class RealtimeServerEventResponseOutputItemAdded + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.output_item.added`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseOutputItemAddedTypeJsonConverter))] + public global::G.RealtimeServerEventResponseOutputItemAddedType Type { get; set; } + + /// + /// The ID of the Response to which the item belongs. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The index of the output item in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The item to add to the conversation. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeConversationItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.output_item.added`. + /// + /// + /// The ID of the Response to which the item belongs. + /// + /// + /// The index of the output item in the Response. + /// + /// + /// The item to add to the conversation. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseOutputItemAdded( + string eventId, + string responseId, + int outputIndex, + global::G.RealtimeConversationItem item, + global::G.RealtimeServerEventResponseOutputItemAddedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.OutputIndex = outputIndex; + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseOutputItemAdded() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs new file mode 100644 index 0000000000..edb4291fa7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemAddedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.output_item.added`. + /// + public enum RealtimeServerEventResponseOutputItemAddedType + { + /// + /// + /// + ResponseOutputItemAdded, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseOutputItemAddedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseOutputItemAddedType value) + { + return value switch + { + RealtimeServerEventResponseOutputItemAddedType.ResponseOutputItemAdded => "response.output_item.added", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseOutputItemAddedType? ToEnum(string value) + { + return value switch + { + "response.output_item.added" => RealtimeServerEventResponseOutputItemAddedType.ResponseOutputItemAdded, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.verified.cs new file mode 100644 index 0000000000..6969c72e9a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseOutputItemDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseOutputItemDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseOutputItemDone), + jsonSerializerContext) as global::G.RealtimeServerEventResponseOutputItemDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseOutputItemDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseOutputItemDone), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseOutputItemDone; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.g.verified.cs new file mode 100644 index 0000000000..545bd9200b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDone.g.verified.cs @@ -0,0 +1,94 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when an Item is done streaming. Also emitted when a Response is
+ /// interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseOutputItemDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.output_item.done`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseOutputItemDoneTypeJsonConverter))] + public global::G.RealtimeServerEventResponseOutputItemDoneType Type { get; set; } + + /// + /// The ID of the Response to which the item belongs. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The index of the output item in the Response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The item to add to the conversation. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeConversationItem Item { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.output_item.done`. + /// + /// + /// The ID of the Response to which the item belongs. + /// + /// + /// The index of the output item in the Response. + /// + /// + /// The item to add to the conversation. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseOutputItemDone( + string eventId, + string responseId, + int outputIndex, + global::G.RealtimeConversationItem item, + global::G.RealtimeServerEventResponseOutputItemDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.OutputIndex = outputIndex; + this.Item = item ?? throw new global::System.ArgumentNullException(nameof(item)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseOutputItemDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs new file mode 100644 index 0000000000..a196c0f904 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseOutputItemDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.output_item.done`. + /// + public enum RealtimeServerEventResponseOutputItemDoneType + { + /// + /// + /// + ResponseOutputItemDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseOutputItemDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseOutputItemDoneType value) + { + return value switch + { + RealtimeServerEventResponseOutputItemDoneType.ResponseOutputItemDone => "response.output_item.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseOutputItemDoneType? ToEnum(string value) + { + return value switch + { + "response.output_item.done" => RealtimeServerEventResponseOutputItemDoneType.ResponseOutputItemDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDelta.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDelta.Json.g.verified.cs new file mode 100644 index 0000000000..3f38e1448b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDelta.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDelta.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseTextDelta + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseTextDelta? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseTextDelta), + jsonSerializerContext) as global::G.RealtimeServerEventResponseTextDelta; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseTextDelta? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseTextDelta), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseTextDelta; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDelta.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDelta.g.verified.cs new file mode 100644 index 0000000000..0fdab6e60d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDelta.g.verified.cs @@ -0,0 +1,117 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDelta.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the text value of a "text" content part is updated. + /// + public sealed partial class RealtimeServerEventResponseTextDelta + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.text.delta`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseTextDeltaTypeJsonConverter))] + public global::G.RealtimeServerEventResponseTextDeltaType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The text delta. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("delta")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Delta { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.text.delta`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The text delta. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseTextDelta( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string delta, + global::G.RealtimeServerEventResponseTextDeltaType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Delta = delta ?? throw new global::System.ArgumentNullException(nameof(delta)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseTextDelta() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDeltaType.g.verified.cs new file mode 100644 index 0000000000..efcfa5aaaa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDeltaType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDeltaType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.text.delta`. + /// + public enum RealtimeServerEventResponseTextDeltaType + { + /// + /// + /// + ResponseTextDelta, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseTextDeltaTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseTextDeltaType value) + { + return value switch + { + RealtimeServerEventResponseTextDeltaType.ResponseTextDelta => "response.text.delta", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseTextDeltaType? ToEnum(string value) + { + return value switch + { + "response.text.delta" => RealtimeServerEventResponseTextDeltaType.ResponseTextDelta, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDone.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDone.Json.g.verified.cs new file mode 100644 index 0000000000..a76e868983 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDone.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDone.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventResponseTextDone + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventResponseTextDone? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventResponseTextDone), + jsonSerializerContext) as global::G.RealtimeServerEventResponseTextDone; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventResponseTextDone? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventResponseTextDone), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventResponseTextDone; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDone.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDone.g.verified.cs new file mode 100644 index 0000000000..1c9fb38fbd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDone.g.verified.cs @@ -0,0 +1,118 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDone.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when the text value of a "text" content part is done streaming. Also
+ /// emitted when a Response is interrupted, incomplete, or cancelled. + ///
+ public sealed partial class RealtimeServerEventResponseTextDone + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `response.text.done`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventResponseTextDoneTypeJsonConverter))] + public global::G.RealtimeServerEventResponseTextDoneType Type { get; set; } + + /// + /// The ID of the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("response_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ResponseId { get; set; } + + /// + /// The ID of the item. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("item_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string ItemId { get; set; } + + /// + /// The index of the output item in the response. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputIndex { get; set; } + + /// + /// The index of the content part in the item's content array. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content_index")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int ContentIndex { get; set; } + + /// + /// The final text content. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string Text { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `response.text.done`. + /// + /// + /// The ID of the response. + /// + /// + /// The ID of the item. + /// + /// + /// The index of the output item in the response. + /// + /// + /// The index of the content part in the item's content array. + /// + /// + /// The final text content. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventResponseTextDone( + string eventId, + string responseId, + string itemId, + int outputIndex, + int contentIndex, + string text, + global::G.RealtimeServerEventResponseTextDoneType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.ResponseId = responseId ?? throw new global::System.ArgumentNullException(nameof(responseId)); + this.ItemId = itemId ?? throw new global::System.ArgumentNullException(nameof(itemId)); + this.OutputIndex = outputIndex; + this.ContentIndex = contentIndex; + this.Text = text ?? throw new global::System.ArgumentNullException(nameof(text)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventResponseTextDone() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDoneType.g.verified.cs new file mode 100644 index 0000000000..0085da81ee --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventResponseTextDoneType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventResponseTextDoneType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `response.text.done`. + /// + public enum RealtimeServerEventResponseTextDoneType + { + /// + /// + /// + ResponseTextDone, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventResponseTextDoneTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventResponseTextDoneType value) + { + return value switch + { + RealtimeServerEventResponseTextDoneType.ResponseTextDone => "response.text.done", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventResponseTextDoneType? ToEnum(string value) + { + return value switch + { + "response.text.done" => RealtimeServerEventResponseTextDoneType.ResponseTextDone, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreated.Json.g.verified.cs new file mode 100644 index 0000000000..4d04ec6220 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventSessionCreated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventSessionCreated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventSessionCreated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventSessionCreated), + jsonSerializerContext) as global::G.RealtimeServerEventSessionCreated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventSessionCreated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventSessionCreated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventSessionCreated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreated.g.verified.cs new file mode 100644 index 0000000000..3cb2d077a7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreated.g.verified.cs @@ -0,0 +1,71 @@ +//HintName: G.Models.RealtimeServerEventSessionCreated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a Session is created. Emitted automatically when a new
+ /// connection is established as the first server event. This event will contain
+ /// the default Session configuration. + ///
+ public sealed partial class RealtimeServerEventSessionCreated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `session.created`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventSessionCreatedTypeJsonConverter))] + public global::G.RealtimeServerEventSessionCreatedType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("session")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeSession Session { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `session.created`. + /// + /// + /// Realtime session object configuration. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventSessionCreated( + string eventId, + global::G.RealtimeSession session, + global::G.RealtimeServerEventSessionCreatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Session = session ?? throw new global::System.ArgumentNullException(nameof(session)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventSessionCreated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreatedType.g.verified.cs new file mode 100644 index 0000000000..770cac6f3f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionCreatedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventSessionCreatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `session.created`. + /// + public enum RealtimeServerEventSessionCreatedType + { + /// + /// + /// + SessionCreated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventSessionCreatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventSessionCreatedType value) + { + return value switch + { + RealtimeServerEventSessionCreatedType.SessionCreated => "session.created", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventSessionCreatedType? ToEnum(string value) + { + return value switch + { + "session.created" => RealtimeServerEventSessionCreatedType.SessionCreated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdated.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdated.Json.g.verified.cs new file mode 100644 index 0000000000..a82261452b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdated.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeServerEventSessionUpdated.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeServerEventSessionUpdated + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeServerEventSessionUpdated? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeServerEventSessionUpdated), + jsonSerializerContext) as global::G.RealtimeServerEventSessionUpdated; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeServerEventSessionUpdated? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeServerEventSessionUpdated), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeServerEventSessionUpdated; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdated.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdated.g.verified.cs new file mode 100644 index 0000000000..960a8e8006 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdated.g.verified.cs @@ -0,0 +1,70 @@ +//HintName: G.Models.RealtimeServerEventSessionUpdated.g.cs + +#nullable enable + +namespace G +{ + /// + /// Returned when a session is updated with a `session.update` event, unless
+ /// there is an error. + ///
+ public sealed partial class RealtimeServerEventSessionUpdated + { + /// + /// The unique ID of the server event. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("event_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string EventId { get; set; } + + /// + /// The event type, must be `session.updated`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeServerEventSessionUpdatedTypeJsonConverter))] + public global::G.RealtimeServerEventSessionUpdatedType Type { get; set; } + + /// + /// Realtime session object configuration. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("session")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::G.RealtimeSession Session { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The unique ID of the server event. + /// + /// + /// The event type, must be `session.updated`. + /// + /// + /// Realtime session object configuration. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeServerEventSessionUpdated( + string eventId, + global::G.RealtimeSession session, + global::G.RealtimeServerEventSessionUpdatedType type) + { + this.EventId = eventId ?? throw new global::System.ArgumentNullException(nameof(eventId)); + this.Session = session ?? throw new global::System.ArgumentNullException(nameof(session)); + this.Type = type; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeServerEventSessionUpdated() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdatedType.g.verified.cs new file mode 100644 index 0000000000..265a95f458 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeServerEventSessionUpdatedType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeServerEventSessionUpdatedType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The event type, must be `session.updated`. + /// + public enum RealtimeServerEventSessionUpdatedType + { + /// + /// + /// + SessionUpdated, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeServerEventSessionUpdatedTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeServerEventSessionUpdatedType value) + { + return value switch + { + RealtimeServerEventSessionUpdatedType.SessionUpdated => "session.updated", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeServerEventSessionUpdatedType? ToEnum(string value) + { + return value switch + { + "session.updated" => RealtimeServerEventSessionUpdatedType.SessionUpdated, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSession.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSession.Json.g.verified.cs new file mode 100644 index 0000000000..fc3b7f3cb4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSession.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeSession.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSession + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeSession? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeSession), + jsonSerializerContext) as global::G.RealtimeSession; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSession? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeSession), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeSession; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSession.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSession.g.verified.cs new file mode 100644 index 0000000000..2ab0ee41f7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSession.g.verified.cs @@ -0,0 +1,210 @@ +//HintName: G.Models.RealtimeSession.g.cs + +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// Realtime session object configuration. + /// + public sealed partial class RealtimeSession + { + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("modalities")] + public global::System.Collections.Generic.IList? Modalities { get; set; } + + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("instructions")] + public string? Instructions { get; set; } + + /// + /// The voice the model uses to respond. Current voice options are `ash`,
+ /// `ballad`, `coral`, `sage`, and `verse`.
+ /// Also supported but not recommended are `alloy`, `echo`, and `shimmer`.
+ /// These older voices are less expressive.
+ /// Voice cannot be changed during the session once the model has
+ /// responded with audio at least once. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("voice")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeSessionVoiceJsonConverter))] + public global::G.RealtimeSessionVoice? Voice { get; set; } + + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_format")] + public string? InputAudioFormat { get; set; } + + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_audio_format")] + public string? OutputAudioFormat { get; set; } + + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("input_audio_transcription")] + public global::G.RealtimeSessionInputAudioTranscription? InputAudioTranscription { get; set; } + + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("turn_detection")] + public global::G.RealtimeSessionTurnDetection? TurnDetection { get; set; } + + /// + /// Tools (functions) available to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("tools")] + public global::System.Collections.Generic.IList? Tools { get; set; } + + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("tool_choice")] + public string? ToolChoice { get; set; } + + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("temperature")] + public double? Temperature { get; set; } + + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("max_response_output_tokens")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.OneOfJsonConverter))] + public global::G.OneOf? MaxResponseOutputTokens { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + /// + /// + /// The default system instructions (i.e. system message) prepended to model
+ /// calls. This field allows the client to guide the model on desired
+ /// responses. The model can be instructed on response content and format,
+ /// (e.g. "be extremely succinct", "act friendly", "here are examples of good
+ /// responses") and on audio behavior (e.g. "talk quickly", "inject emotion
+ /// into your voice", "laugh frequently"). The instructions are not guaranteed
+ /// to be followed by the model, but they provide guidance to the model on the
+ /// desired behavior.
+ /// Note that the server sets default instructions which will be used if this
+ /// field is not set and are visible in the `session.created` event at the
+ /// start of the session. + /// + /// + /// The voice the model uses to respond. Current voice options are `ash`,
+ /// `ballad`, `coral`, `sage`, and `verse`.
+ /// Also supported but not recommended are `alloy`, `echo`, and `shimmer`.
+ /// These older voices are less expressive.
+ /// Voice cannot be changed during the session once the model has
+ /// responded with audio at least once. + /// + /// + /// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + /// + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + /// + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + /// + /// + /// Tools (functions) available to the model. + /// + /// + /// How the model chooses tools. Options are `auto`, `none`, `required`, or
+ /// specify a function. + /// + /// + /// Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + /// + /// + /// Maximum number of output tokens for a single assistant response,
+ /// inclusive of tool calls. Provide an integer between 1 and 4096 to
+ /// limit output tokens, or `inf` for the maximum available tokens for a
+ /// given model. Defaults to `inf`. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSession( + global::System.Collections.Generic.IList? modalities, + string? instructions, + global::G.RealtimeSessionVoice? voice, + string? inputAudioFormat, + string? outputAudioFormat, + global::G.RealtimeSessionInputAudioTranscription? inputAudioTranscription, + global::G.RealtimeSessionTurnDetection? turnDetection, + global::System.Collections.Generic.IList? tools, + string? toolChoice, + double? temperature, + global::G.OneOf? maxResponseOutputTokens) + { + this.Modalities = modalities; + this.Instructions = instructions; + this.Voice = voice; + this.InputAudioFormat = inputAudioFormat; + this.OutputAudioFormat = outputAudioFormat; + this.InputAudioTranscription = inputAudioTranscription; + this.TurnDetection = turnDetection; + this.Tools = tools; + this.ToolChoice = toolChoice; + this.Temperature = temperature; + this.MaxResponseOutputTokens = maxResponseOutputTokens; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSession() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionInputAudioTranscription.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionInputAudioTranscription.Json.g.verified.cs new file mode 100644 index 0000000000..4e173731be --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionInputAudioTranscription.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeSessionInputAudioTranscription.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionInputAudioTranscription + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeSessionInputAudioTranscription), + jsonSerializerContext) as global::G.RealtimeSessionInputAudioTranscription; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionInputAudioTranscription? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeSessionInputAudioTranscription), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeSessionInputAudioTranscription; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionInputAudioTranscription.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionInputAudioTranscription.g.verified.cs new file mode 100644 index 0000000000..e3e23c4425 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionInputAudioTranscription.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: G.Models.RealtimeSessionInputAudioTranscription.g.cs + +#nullable enable + +namespace G +{ + /// + /// Configuration for input audio transcription, defaults to off and can be
+ /// set to `null` to turn off once on. Input audio transcription is not native
+ /// to the model, since the model consumes audio directly. Transcription runs
+ /// asynchronously through Whisper and should be treated as rough guidance
+ /// rather than the representation understood by the model. + ///
+ public sealed partial class RealtimeSessionInputAudioTranscription + { + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The model to use for transcription, `whisper-1` is the only currently
+ /// supported model. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionInputAudioTranscription( + string? model) + { + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionInputAudioTranscription() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionMaxResponseOutputTokens.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionMaxResponseOutputTokens.g.verified.cs new file mode 100644 index 0000000000..d21ffbf5b6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionMaxResponseOutputTokens.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeSessionMaxResponseOutputTokens.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum RealtimeSessionMaxResponseOutputTokens + { + /// + /// + /// + Inf, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionMaxResponseOutputTokensExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionMaxResponseOutputTokens value) + { + return value switch + { + RealtimeSessionMaxResponseOutputTokens.Inf => "inf", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionMaxResponseOutputTokens? ToEnum(string value) + { + return value switch + { + "inf" => RealtimeSessionMaxResponseOutputTokens.Inf, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalitie.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalitie.g.verified.cs new file mode 100644 index 0000000000..47355d9d34 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalitie.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.RealtimeSessionModalitie.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum RealtimeSessionModalitie + { + /// + /// + /// + Text, + /// + /// + /// + Audio, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionModalitieExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionModalitie value) + { + return value switch + { + RealtimeSessionModalitie.Text => "text", + RealtimeSessionModalitie.Audio => "audio", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionModalitie? ToEnum(string value) + { + return value switch + { + "text" => RealtimeSessionModalitie.Text, + "audio" => RealtimeSessionModalitie.Audio, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalities.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalities.Json.g.verified.cs new file mode 100644 index 0000000000..98bb470471 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalities.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeSessionModalities.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionModalities + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeSessionModalities? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeSessionModalities), + jsonSerializerContext) as global::G.RealtimeSessionModalities; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionModalities? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeSessionModalities), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeSessionModalities; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalities.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalities.g.verified.cs new file mode 100644 index 0000000000..71f49be53c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionModalities.g.verified.cs @@ -0,0 +1,29 @@ +//HintName: G.Models.RealtimeSessionModalities.g.cs + +#nullable enable + +namespace G +{ + /// + /// The set of modalities the model can respond with. To disable audio,
+ /// set this to ["text"]. + ///
+ public sealed partial class RealtimeSessionModalities + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionModalities( + ) + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTool.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTool.Json.g.verified.cs new file mode 100644 index 0000000000..55252b2eab --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTool.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeSessionTool.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionTool + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeSessionTool? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeSessionTool), + jsonSerializerContext) as global::G.RealtimeSessionTool; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionTool? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeSessionTool), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeSessionTool; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTool.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTool.g.verified.cs new file mode 100644 index 0000000000..e3d3937f3c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTool.g.verified.cs @@ -0,0 +1,82 @@ +//HintName: G.Models.RealtimeSessionTool.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RealtimeSessionTool + { + /// + /// The type of the tool, i.e. `function`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RealtimeSessionToolTypeJsonConverter))] + public global::G.RealtimeSessionToolType? Type { get; set; } + + /// + /// The name of the function. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("description")] + public string? Description { get; set; } + + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("parameters")] + public object? Parameters { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the tool, i.e. `function`. + /// + /// + /// The name of the function. + /// + /// + /// The description of the function, including guidance on when and how
+ /// to call it, and guidance about what to tell the user when calling
+ /// (if anything). + /// + /// + /// Parameters of the function in JSON Schema. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionTool( + global::G.RealtimeSessionToolType? type, + string? name, + string? description, + object? parameters) + { + this.Type = type; + this.Name = name; + this.Description = description; + this.Parameters = parameters; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionTool() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolParameters.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolParameters.Json.g.verified.cs new file mode 100644 index 0000000000..b68c56b5f9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolParameters.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeSessionToolParameters.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionToolParameters + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeSessionToolParameters? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeSessionToolParameters), + jsonSerializerContext) as global::G.RealtimeSessionToolParameters; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionToolParameters? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeSessionToolParameters), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeSessionToolParameters; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolParameters.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolParameters.g.verified.cs new file mode 100644 index 0000000000..d8392c5860 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolParameters.g.verified.cs @@ -0,0 +1,28 @@ +//HintName: G.Models.RealtimeSessionToolParameters.g.cs + +#nullable enable + +namespace G +{ + /// + /// Parameters of the function in JSON Schema. + /// + public sealed partial class RealtimeSessionToolParameters + { + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionToolParameters( + ) + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolType.g.verified.cs new file mode 100644 index 0000000000..6344db341b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionToolType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RealtimeSessionToolType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the tool, i.e. `function`. + /// + public enum RealtimeSessionToolType + { + /// + /// + /// + Function, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionToolTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionToolType value) + { + return value switch + { + RealtimeSessionToolType.Function => "function", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionToolType? ToEnum(string value) + { + return value switch + { + "function" => RealtimeSessionToolType.Function, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTurnDetection.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTurnDetection.Json.g.verified.cs new file mode 100644 index 0000000000..6abe1465e3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTurnDetection.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeSessionTurnDetection.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RealtimeSessionTurnDetection + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RealtimeSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RealtimeSessionTurnDetection), + jsonSerializerContext) as global::G.RealtimeSessionTurnDetection; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RealtimeSessionTurnDetection? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RealtimeSessionTurnDetection), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RealtimeSessionTurnDetection; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTurnDetection.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTurnDetection.g.verified.cs new file mode 100644 index 0000000000..d8308ff5b8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionTurnDetection.g.verified.cs @@ -0,0 +1,89 @@ +//HintName: G.Models.RealtimeSessionTurnDetection.g.cs + +#nullable enable + +namespace G +{ + /// + /// Configuration for turn detection. Can be set to `null` to turn off. Server
+ /// VAD means that the model will detect the start and end of speech based on
+ /// audio volume and respond at the end of user speech. + ///
+ public sealed partial class RealtimeSessionTurnDetection + { + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("threshold")] + public double? Threshold { get; set; } + + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("prefix_padding_ms")] + public int? PrefixPaddingMs { get; set; } + + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + ///
+ [global::System.Text.Json.Serialization.JsonPropertyName("silence_duration_ms")] + public int? SilenceDurationMs { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// Type of turn detection, only `server_vad` is currently supported. + /// + /// + /// Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A
+ /// higher threshold will require louder audio to activate the model, and
+ /// thus might perform better in noisy environments. + /// + /// + /// Amount of audio to include before the VAD detected speech (in
+ /// milliseconds). Defaults to 300ms. + /// + /// + /// Duration of silence to detect speech stop (in milliseconds). Defaults
+ /// to 500ms. With shorter values the model will respond more quickly,
+ /// but may jump in on short pauses from the user. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RealtimeSessionTurnDetection( + string? type, + double? threshold, + int? prefixPaddingMs, + int? silenceDurationMs) + { + this.Type = type; + this.Threshold = threshold; + this.PrefixPaddingMs = prefixPaddingMs; + this.SilenceDurationMs = silenceDurationMs; + } + + /// + /// Initializes a new instance of the class. + /// + public RealtimeSessionTurnDetection() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionVoice.g.verified.cs new file mode 100644 index 0000000000..34ff865f7d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RealtimeSessionVoice.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RealtimeSessionVoice.g.cs + +#nullable enable + +namespace G +{ + /// + /// The voice the model uses to respond. Current voice options are `ash`,
+ /// `ballad`, `coral`, `sage`, and `verse`.
+ /// Also supported but not recommended are `alloy`, `echo`, and `shimmer`.
+ /// These older voices are less expressive.
+ /// Voice cannot be changed during the session once the model has
+ /// responded with audio at least once. + ///
+ public enum RealtimeSessionVoice + { + /// + /// + /// + Alloy, + /// + /// + /// + Ash, + /// + /// + /// + Ballad, + /// + /// + /// + Coral, + /// + /// + /// + Echo, + /// + /// + /// + Sage, + /// + /// + /// + Shimmer, + /// + /// + /// + Verse, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RealtimeSessionVoiceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RealtimeSessionVoice value) + { + return value switch + { + RealtimeSessionVoice.Alloy => "alloy", + RealtimeSessionVoice.Ash => "ash", + RealtimeSessionVoice.Ballad => "ballad", + RealtimeSessionVoice.Coral => "coral", + RealtimeSessionVoice.Echo => "echo", + RealtimeSessionVoice.Sage => "sage", + RealtimeSessionVoice.Shimmer => "shimmer", + RealtimeSessionVoice.Verse => "verse", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RealtimeSessionVoice? ToEnum(string value) + { + return value switch + { + "alloy" => RealtimeSessionVoice.Alloy, + "ash" => RealtimeSessionVoice.Ash, + "ballad" => RealtimeSessionVoice.Ballad, + "coral" => RealtimeSessionVoice.Coral, + "echo" => RealtimeSessionVoice.Echo, + "sage" => RealtimeSessionVoice.Sage, + "shimmer" => RealtimeSessionVoice.Shimmer, + "verse" => RealtimeSessionVoice.Verse, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResponseFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResponseFormat.g.verified.cs index 50821a2646..68abbc4443 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResponseFormat.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResponseFormat.g.verified.cs @@ -6,9 +6,9 @@ namespace G { /// - /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
public readonly partial struct ResponseFormat : global::System.IEquatable diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResultItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResultItem.Json.g.verified.cs new file mode 100644 index 0000000000..87435971fb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResultItem.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.ResultItem.Json.g.cs +#nullable enable + +namespace G +{ + public readonly partial struct ResultItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.ResultItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.ResultItem), + jsonSerializerContext) as global::G.ResultItem?; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.ResultItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.ResultItem), + jsonSerializerContext).ConfigureAwait(false)) as global::G.ResultItem?; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResultItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResultItem.g.verified.cs new file mode 100644 index 0000000000..6d9bc3edd0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ResultItem.g.verified.cs @@ -0,0 +1,580 @@ +//HintName: G.Models.ResultItem.g.cs +#pragma warning disable CS0618 // Type or member is obsolete + +#nullable enable + +namespace G +{ + /// + /// + /// + public readonly partial struct ResultItem : global::System.IEquatable + { + /// + /// + /// + public global::G.UsageTimeBucketResultItemDiscriminatorObject? Object { get; } + + /// + /// The aggregated completions usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageCompletionsResult? OrganizationUsageCompletionsResult { get; init; } +#else + public global::G.UsageCompletionsResult? OrganizationUsageCompletionsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageCompletionsResult))] +#endif + public bool IsOrganizationUsageCompletionsResult => OrganizationUsageCompletionsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageCompletionsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageCompletionsResult?(ResultItem @this) => @this.OrganizationUsageCompletionsResult; + + /// + /// + /// + public ResultItem(global::G.UsageCompletionsResult? value) + { + OrganizationUsageCompletionsResult = value; + } + + /// + /// The aggregated embeddings usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageEmbeddingsResult? OrganizationUsageEmbeddingsResult { get; init; } +#else + public global::G.UsageEmbeddingsResult? OrganizationUsageEmbeddingsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageEmbeddingsResult))] +#endif + public bool IsOrganizationUsageEmbeddingsResult => OrganizationUsageEmbeddingsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageEmbeddingsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageEmbeddingsResult?(ResultItem @this) => @this.OrganizationUsageEmbeddingsResult; + + /// + /// + /// + public ResultItem(global::G.UsageEmbeddingsResult? value) + { + OrganizationUsageEmbeddingsResult = value; + } + + /// + /// The aggregated moderations usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageModerationsResult? OrganizationUsageModerationsResult { get; init; } +#else + public global::G.UsageModerationsResult? OrganizationUsageModerationsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageModerationsResult))] +#endif + public bool IsOrganizationUsageModerationsResult => OrganizationUsageModerationsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageModerationsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageModerationsResult?(ResultItem @this) => @this.OrganizationUsageModerationsResult; + + /// + /// + /// + public ResultItem(global::G.UsageModerationsResult? value) + { + OrganizationUsageModerationsResult = value; + } + + /// + /// The aggregated images usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageImagesResult? OrganizationUsageImagesResult { get; init; } +#else + public global::G.UsageImagesResult? OrganizationUsageImagesResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageImagesResult))] +#endif + public bool IsOrganizationUsageImagesResult => OrganizationUsageImagesResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageImagesResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageImagesResult?(ResultItem @this) => @this.OrganizationUsageImagesResult; + + /// + /// + /// + public ResultItem(global::G.UsageImagesResult? value) + { + OrganizationUsageImagesResult = value; + } + + /// + /// The aggregated audio speeches usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageAudioSpeechesResult? OrganizationUsageAudioSpeechesResult { get; init; } +#else + public global::G.UsageAudioSpeechesResult? OrganizationUsageAudioSpeechesResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageAudioSpeechesResult))] +#endif + public bool IsOrganizationUsageAudioSpeechesResult => OrganizationUsageAudioSpeechesResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageAudioSpeechesResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageAudioSpeechesResult?(ResultItem @this) => @this.OrganizationUsageAudioSpeechesResult; + + /// + /// + /// + public ResultItem(global::G.UsageAudioSpeechesResult? value) + { + OrganizationUsageAudioSpeechesResult = value; + } + + /// + /// The aggregated audio transcriptions usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageAudioTranscriptionsResult? OrganizationUsageAudioTranscriptionsResult { get; init; } +#else + public global::G.UsageAudioTranscriptionsResult? OrganizationUsageAudioTranscriptionsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageAudioTranscriptionsResult))] +#endif + public bool IsOrganizationUsageAudioTranscriptionsResult => OrganizationUsageAudioTranscriptionsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageAudioTranscriptionsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageAudioTranscriptionsResult?(ResultItem @this) => @this.OrganizationUsageAudioTranscriptionsResult; + + /// + /// + /// + public ResultItem(global::G.UsageAudioTranscriptionsResult? value) + { + OrganizationUsageAudioTranscriptionsResult = value; + } + + /// + /// The aggregated vector stores usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageVectorStoresResult? OrganizationUsageVectorStoresResult { get; init; } +#else + public global::G.UsageVectorStoresResult? OrganizationUsageVectorStoresResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageVectorStoresResult))] +#endif + public bool IsOrganizationUsageVectorStoresResult => OrganizationUsageVectorStoresResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageVectorStoresResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageVectorStoresResult?(ResultItem @this) => @this.OrganizationUsageVectorStoresResult; + + /// + /// + /// + public ResultItem(global::G.UsageVectorStoresResult? value) + { + OrganizationUsageVectorStoresResult = value; + } + + /// + /// The aggregated code interpreter sessions usage details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.UsageCodeInterpreterSessionsResult? OrganizationUsageCodeInterpreterSessionsResult { get; init; } +#else + public global::G.UsageCodeInterpreterSessionsResult? OrganizationUsageCodeInterpreterSessionsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationUsageCodeInterpreterSessionsResult))] +#endif + public bool IsOrganizationUsageCodeInterpreterSessionsResult => OrganizationUsageCodeInterpreterSessionsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.UsageCodeInterpreterSessionsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.UsageCodeInterpreterSessionsResult?(ResultItem @this) => @this.OrganizationUsageCodeInterpreterSessionsResult; + + /// + /// + /// + public ResultItem(global::G.UsageCodeInterpreterSessionsResult? value) + { + OrganizationUsageCodeInterpreterSessionsResult = value; + } + + /// + /// The aggregated costs details of the specific time bucket. + /// +#if NET6_0_OR_GREATER + public global::G.CostsResult? OrganizationCostsResult { get; init; } +#else + public global::G.CostsResult? OrganizationCostsResult { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(OrganizationCostsResult))] +#endif + public bool IsOrganizationCostsResult => OrganizationCostsResult != null; + + /// + /// + /// + public static implicit operator ResultItem(global::G.CostsResult value) => new ResultItem(value); + + /// + /// + /// + public static implicit operator global::G.CostsResult?(ResultItem @this) => @this.OrganizationCostsResult; + + /// + /// + /// + public ResultItem(global::G.CostsResult? value) + { + OrganizationCostsResult = value; + } + + /// + /// + /// + public ResultItem( + global::G.UsageTimeBucketResultItemDiscriminatorObject? @object, + global::G.UsageCompletionsResult? organizationUsageCompletionsResult, + global::G.UsageEmbeddingsResult? organizationUsageEmbeddingsResult, + global::G.UsageModerationsResult? organizationUsageModerationsResult, + global::G.UsageImagesResult? organizationUsageImagesResult, + global::G.UsageAudioSpeechesResult? organizationUsageAudioSpeechesResult, + global::G.UsageAudioTranscriptionsResult? organizationUsageAudioTranscriptionsResult, + global::G.UsageVectorStoresResult? organizationUsageVectorStoresResult, + global::G.UsageCodeInterpreterSessionsResult? organizationUsageCodeInterpreterSessionsResult, + global::G.CostsResult? organizationCostsResult + ) + { + Object = @object; + + OrganizationUsageCompletionsResult = organizationUsageCompletionsResult; + OrganizationUsageEmbeddingsResult = organizationUsageEmbeddingsResult; + OrganizationUsageModerationsResult = organizationUsageModerationsResult; + OrganizationUsageImagesResult = organizationUsageImagesResult; + OrganizationUsageAudioSpeechesResult = organizationUsageAudioSpeechesResult; + OrganizationUsageAudioTranscriptionsResult = organizationUsageAudioTranscriptionsResult; + OrganizationUsageVectorStoresResult = organizationUsageVectorStoresResult; + OrganizationUsageCodeInterpreterSessionsResult = organizationUsageCodeInterpreterSessionsResult; + OrganizationCostsResult = organizationCostsResult; + } + + /// + /// + /// + public object? Object1 => + OrganizationCostsResult as object ?? + OrganizationUsageCodeInterpreterSessionsResult as object ?? + OrganizationUsageVectorStoresResult as object ?? + OrganizationUsageAudioTranscriptionsResult as object ?? + OrganizationUsageAudioSpeechesResult as object ?? + OrganizationUsageImagesResult as object ?? + OrganizationUsageModerationsResult as object ?? + OrganizationUsageEmbeddingsResult as object ?? + OrganizationUsageCompletionsResult as object + ; + + /// + /// + /// + public bool Validate() + { + return IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && IsOrganizationUsageCodeInterpreterSessionsResult && !IsOrganizationCostsResult || !IsOrganizationUsageCompletionsResult && !IsOrganizationUsageEmbeddingsResult && !IsOrganizationUsageModerationsResult && !IsOrganizationUsageImagesResult && !IsOrganizationUsageAudioSpeechesResult && !IsOrganizationUsageAudioTranscriptionsResult && !IsOrganizationUsageVectorStoresResult && !IsOrganizationUsageCodeInterpreterSessionsResult && IsOrganizationCostsResult; + } + + /// + /// + /// + public TResult? Match( + global::System.Func? organizationUsageCompletionsResult = null, + global::System.Func? organizationUsageEmbeddingsResult = null, + global::System.Func? organizationUsageModerationsResult = null, + global::System.Func? organizationUsageImagesResult = null, + global::System.Func? organizationUsageAudioSpeechesResult = null, + global::System.Func? organizationUsageAudioTranscriptionsResult = null, + global::System.Func? organizationUsageVectorStoresResult = null, + global::System.Func? organizationUsageCodeInterpreterSessionsResult = null, + global::System.Func? organizationCostsResult = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsOrganizationUsageCompletionsResult && organizationUsageCompletionsResult != null) + { + return organizationUsageCompletionsResult(OrganizationUsageCompletionsResult!); + } + else if (IsOrganizationUsageEmbeddingsResult && organizationUsageEmbeddingsResult != null) + { + return organizationUsageEmbeddingsResult(OrganizationUsageEmbeddingsResult!); + } + else if (IsOrganizationUsageModerationsResult && organizationUsageModerationsResult != null) + { + return organizationUsageModerationsResult(OrganizationUsageModerationsResult!); + } + else if (IsOrganizationUsageImagesResult && organizationUsageImagesResult != null) + { + return organizationUsageImagesResult(OrganizationUsageImagesResult!); + } + else if (IsOrganizationUsageAudioSpeechesResult && organizationUsageAudioSpeechesResult != null) + { + return organizationUsageAudioSpeechesResult(OrganizationUsageAudioSpeechesResult!); + } + else if (IsOrganizationUsageAudioTranscriptionsResult && organizationUsageAudioTranscriptionsResult != null) + { + return organizationUsageAudioTranscriptionsResult(OrganizationUsageAudioTranscriptionsResult!); + } + else if (IsOrganizationUsageVectorStoresResult && organizationUsageVectorStoresResult != null) + { + return organizationUsageVectorStoresResult(OrganizationUsageVectorStoresResult!); + } + else if (IsOrganizationUsageCodeInterpreterSessionsResult && organizationUsageCodeInterpreterSessionsResult != null) + { + return organizationUsageCodeInterpreterSessionsResult(OrganizationUsageCodeInterpreterSessionsResult!); + } + else if (IsOrganizationCostsResult && organizationCostsResult != null) + { + return organizationCostsResult(OrganizationCostsResult!); + } + + return default(TResult); + } + + /// + /// + /// + public void Match( + global::System.Action? organizationUsageCompletionsResult = null, + global::System.Action? organizationUsageEmbeddingsResult = null, + global::System.Action? organizationUsageModerationsResult = null, + global::System.Action? organizationUsageImagesResult = null, + global::System.Action? organizationUsageAudioSpeechesResult = null, + global::System.Action? organizationUsageAudioTranscriptionsResult = null, + global::System.Action? organizationUsageVectorStoresResult = null, + global::System.Action? organizationUsageCodeInterpreterSessionsResult = null, + global::System.Action? organizationCostsResult = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsOrganizationUsageCompletionsResult) + { + organizationUsageCompletionsResult?.Invoke(OrganizationUsageCompletionsResult!); + } + else if (IsOrganizationUsageEmbeddingsResult) + { + organizationUsageEmbeddingsResult?.Invoke(OrganizationUsageEmbeddingsResult!); + } + else if (IsOrganizationUsageModerationsResult) + { + organizationUsageModerationsResult?.Invoke(OrganizationUsageModerationsResult!); + } + else if (IsOrganizationUsageImagesResult) + { + organizationUsageImagesResult?.Invoke(OrganizationUsageImagesResult!); + } + else if (IsOrganizationUsageAudioSpeechesResult) + { + organizationUsageAudioSpeechesResult?.Invoke(OrganizationUsageAudioSpeechesResult!); + } + else if (IsOrganizationUsageAudioTranscriptionsResult) + { + organizationUsageAudioTranscriptionsResult?.Invoke(OrganizationUsageAudioTranscriptionsResult!); + } + else if (IsOrganizationUsageVectorStoresResult) + { + organizationUsageVectorStoresResult?.Invoke(OrganizationUsageVectorStoresResult!); + } + else if (IsOrganizationUsageCodeInterpreterSessionsResult) + { + organizationUsageCodeInterpreterSessionsResult?.Invoke(OrganizationUsageCodeInterpreterSessionsResult!); + } + else if (IsOrganizationCostsResult) + { + organizationCostsResult?.Invoke(OrganizationCostsResult!); + } + } + + /// + /// + /// + public override int GetHashCode() + { + var fields = new object?[] + { + OrganizationUsageCompletionsResult, + typeof(global::G.UsageCompletionsResult), + OrganizationUsageEmbeddingsResult, + typeof(global::G.UsageEmbeddingsResult), + OrganizationUsageModerationsResult, + typeof(global::G.UsageModerationsResult), + OrganizationUsageImagesResult, + typeof(global::G.UsageImagesResult), + OrganizationUsageAudioSpeechesResult, + typeof(global::G.UsageAudioSpeechesResult), + OrganizationUsageAudioTranscriptionsResult, + typeof(global::G.UsageAudioTranscriptionsResult), + OrganizationUsageVectorStoresResult, + typeof(global::G.UsageVectorStoresResult), + OrganizationUsageCodeInterpreterSessionsResult, + typeof(global::G.UsageCodeInterpreterSessionsResult), + OrganizationCostsResult, + typeof(global::G.CostsResult), + }; + const int offset = unchecked((int)2166136261); + const int prime = 16777619; + static int HashCodeAggregator(int hashCode, object? value) => value == null + ? (hashCode ^ 0) * prime + : (hashCode ^ value.GetHashCode()) * prime; + + return global::System.Linq.Enumerable.Aggregate(fields, offset, HashCodeAggregator); + } + + /// + /// + /// + public bool Equals(ResultItem other) + { + return + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageCompletionsResult, other.OrganizationUsageCompletionsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageEmbeddingsResult, other.OrganizationUsageEmbeddingsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageModerationsResult, other.OrganizationUsageModerationsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageImagesResult, other.OrganizationUsageImagesResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageAudioSpeechesResult, other.OrganizationUsageAudioSpeechesResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageAudioTranscriptionsResult, other.OrganizationUsageAudioTranscriptionsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageVectorStoresResult, other.OrganizationUsageVectorStoresResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationUsageCodeInterpreterSessionsResult, other.OrganizationUsageCodeInterpreterSessionsResult) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(OrganizationCostsResult, other.OrganizationCostsResult) + ; + } + + /// + /// + /// + public static bool operator ==(ResultItem obj1, ResultItem obj2) + { + return global::System.Collections.Generic.EqualityComparer.Default.Equals(obj1, obj2); + } + + /// + /// + /// + public static bool operator !=(ResultItem obj1, ResultItem obj2) + { + return !(obj1 == obj2); + } + + /// + /// + /// + public override bool Equals(object? obj) + { + return obj is ResultItem o && Equals(o); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObject.g.verified.cs index 37c25d39de..2436f967da 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObject.g.verified.cs @@ -129,14 +129,15 @@ public sealed partial class RunObject public required string Instructions { get; set; } /// - /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("tools")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::System.Collections.Generic.IList Tools { get; set; } + public required global::System.Collections.Generic.IList Tools { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] [global::System.Text.Json.Serialization.JsonRequired] @@ -195,16 +196,18 @@ public sealed partial class RunObject public required global::G.AssistantsApiToolChoiceOption ToolChoice { get; set; } /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true ///
+ /// true [global::System.Text.Json.Serialization.JsonPropertyName("parallel_tool_calls")] [global::System.Text.Json.Serialization.JsonRequired] - public required bool? ParallelToolCalls { get; set; } + public required bool ParallelToolCalls { get; set; } = true; /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. ///
[global::System.Text.Json.Serialization.JsonPropertyName("response_format")] @@ -270,10 +273,11 @@ public sealed partial class RunObject /// The instructions that the [assistant](/docs/api-reference/assistants) used for this run. /// /// - /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + /// The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.
+ /// Default Value: [] /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// /// Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -301,12 +305,13 @@ public sealed partial class RunObject /// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// - /// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
+ /// Default Value: true /// /// - /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
- /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
- /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
+ /// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] @@ -326,14 +331,14 @@ public RunObject( global::G.RunObjectIncompleteDetails? incompleteDetails, string model, string instructions, - global::System.Collections.Generic.IList tools, + global::System.Collections.Generic.IList tools, object? metadata, global::G.RunCompletionUsage? usage, int? maxPromptTokens, int? maxCompletionTokens, global::G.TruncationObject truncationStrategy, global::G.AssistantsApiToolChoiceOption toolChoice, - bool? parallelToolCalls, + bool parallelToolCalls, global::G.AssistantsApiResponseFormatOption responseFormat, global::G.RunObjectObject @object, double? temperature, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObjectMetadata.g.verified.cs index 57afc61226..980f4d0ecd 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class RunObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs index df74487752..3ba44f0bbd 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDeltaStepDetailsToolCallsObject.g.verified.cs @@ -20,7 +20,7 @@ public sealed partial class RunStepDeltaStepDetailsToolCallsObject /// An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. ///
[global::System.Text.Json.Serialization.JsonPropertyName("tool_calls")] - public global::System.Collections.Generic.IList? ToolCalls { get; set; } + public global::System.Collections.Generic.IList? ToolCalls { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -40,7 +40,7 @@ public sealed partial class RunStepDeltaStepDetailsToolCallsObject [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RunStepDeltaStepDetailsToolCallsObject( global::G.RunStepDeltaStepDetailsToolCallsObjectType type, - global::System.Collections.Generic.IList? toolCalls) + global::System.Collections.Generic.IList? toolCalls) { this.Type = type; this.ToolCalls = toolCalls; diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs index cf4ef461c1..d37b98db63 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObject.g.verified.cs @@ -28,7 +28,7 @@ public sealed partial class RunStepDetailsToolCallsFileSearchObject /// [global::System.Text.Json.Serialization.JsonPropertyName("file_search")] [global::System.Text.Json.Serialization.JsonRequired] - public required object FileSearch { get; set; } + public required global::G.RunStepDetailsToolCallsFileSearchObjectFileSearch FileSearch { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -51,7 +51,7 @@ public sealed partial class RunStepDetailsToolCallsFileSearchObject [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RunStepDetailsToolCallsFileSearchObject( string id, - object fileSearch, + global::G.RunStepDetailsToolCallsFileSearchObjectFileSearch fileSearch, global::G.RunStepDetailsToolCallsFileSearchObjectType type) { this.Id = id ?? throw new global::System.ArgumentNullException(nameof(id)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs index 8107b940fd..0117b21ecc 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchObjectFileSearch.g.verified.cs @@ -9,6 +9,17 @@ namespace G /// public sealed partial class RunStepDetailsToolCallsFileSearchObjectFileSearch { + /// + /// The ranking options for the file search. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("ranking_options")] + public global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? RankingOptions { get; set; } + + /// + /// The results of the file search. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("results")] + public global::System.Collections.Generic.IList? Results { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -19,9 +30,25 @@ public sealed partial class RunStepDetailsToolCallsFileSearchObjectFileSearch /// /// Initializes a new instance of the class. /// + /// + /// The ranking options for the file search. + /// + /// + /// The results of the file search. + /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RunStepDetailsToolCallsFileSearchObjectFileSearch( - ) + global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? rankingOptions, + global::System.Collections.Generic.IList? results) + { + this.RankingOptions = rankingOptions; + this.Results = results; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchObjectFileSearch() { } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.verified.cs new file mode 100644 index 0000000000..cb3d8e15a2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RunStepDetailsToolCallsFileSearchRankingOptionsObject + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject), + jsonSerializerContext) as global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.verified.cs new file mode 100644 index 0000000000..ad9590edfd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranking options for the file search. + /// + public sealed partial class RunStepDetailsToolCallsFileSearchRankingOptionsObject + { + /// + /// The ranker used for the file search. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("ranker")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerJsonConverter))] + public global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker Ranker { get; set; } + + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("score_threshold")] + [global::System.Text.Json.Serialization.JsonRequired] + public required double ScoreThreshold { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The ranker used for the file search. + /// + /// + /// The score threshold for the file search. All values must be a floating point number between 0 and 1. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RunStepDetailsToolCallsFileSearchRankingOptionsObject( + double scoreThreshold, + global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker ranker) + { + this.ScoreThreshold = scoreThreshold; + this.Ranker = ranker; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchRankingOptionsObject() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs new file mode 100644 index 0000000000..9c8d14e6fb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.cs + +#nullable enable + +namespace G +{ + /// + /// The ranker used for the file search. + /// + public enum RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker + { + /// + /// + /// + Default20240821, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker value) + { + return value switch + { + RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.Default20240821 => "default_2024_08_21", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker? ToEnum(string value) + { + return value switch + { + "default_2024_08_21" => RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.Default20240821, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.verified.cs new file mode 100644 index 0000000000..b644ec8131 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObject.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RunStepDetailsToolCallsFileSearchResultObject + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RunStepDetailsToolCallsFileSearchResultObject? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RunStepDetailsToolCallsFileSearchResultObject), + jsonSerializerContext) as global::G.RunStepDetailsToolCallsFileSearchResultObject; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RunStepDetailsToolCallsFileSearchResultObject? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RunStepDetailsToolCallsFileSearchResultObject), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RunStepDetailsToolCallsFileSearchResultObject; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.verified.cs new file mode 100644 index 0000000000..0409d10777 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.verified.cs @@ -0,0 +1,80 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// A result instance of the file search. + /// + public sealed partial class RunStepDetailsToolCallsFileSearchResultObject + { + /// + /// The ID of the file that result was found in. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("file_id")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string FileId { get; set; } + + /// + /// The name of the file that result was found in. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("file_name")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string FileName { get; set; } + + /// + /// The score of the result. All values must be a floating point number between 0 and 1. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("score")] + [global::System.Text.Json.Serialization.JsonRequired] + public required double Score { get; set; } + + /// + /// The content of the result that was found. The content is only included if requested via the include query parameter. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("content")] + public global::System.Collections.Generic.IList? Content { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The ID of the file that result was found in. + /// + /// + /// The name of the file that result was found in. + /// + /// + /// The score of the result. All values must be a floating point number between 0 and 1. + /// + /// + /// The content of the result that was found. The content is only included if requested via the include query parameter. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RunStepDetailsToolCallsFileSearchResultObject( + string fileId, + string fileName, + double score, + global::System.Collections.Generic.IList? content) + { + this.FileId = fileId ?? throw new global::System.ArgumentNullException(nameof(fileId)); + this.FileName = fileName ?? throw new global::System.ArgumentNullException(nameof(fileName)); + this.Score = score; + this.Content = content; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchResultObject() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.verified.cs new file mode 100644 index 0000000000..af076d2dd1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class RunStepDetailsToolCallsFileSearchResultObjectContentItem + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem), + jsonSerializerContext) as global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem), + jsonSerializerContext).ConfigureAwait(false)) as global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.verified.cs new file mode 100644 index 0000000000..0b6f8a4a1e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.verified.cs @@ -0,0 +1,56 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class RunStepDetailsToolCallsFileSearchResultObjectContentItem + { + /// + /// The type of the content. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("type")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeJsonConverter))] + public global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? Type { get; set; } + + /// + /// The text content of the file. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("text")] + public string? Text { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// The type of the content. + /// + /// + /// The text content of the file. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public RunStepDetailsToolCallsFileSearchResultObjectContentItem( + global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? type, + string? text) + { + this.Type = type; + this.Text = text; + } + + /// + /// Initializes a new instance of the class. + /// + public RunStepDetailsToolCallsFileSearchResultObjectContentItem() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs new file mode 100644 index 0000000000..20631107f6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.cs + +#nullable enable + +namespace G +{ + /// + /// The type of the content. + /// + public enum RunStepDetailsToolCallsFileSearchResultObjectContentItemType + { + /// + /// + /// + Text, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this RunStepDetailsToolCallsFileSearchResultObjectContentItemType value) + { + return value switch + { + RunStepDetailsToolCallsFileSearchResultObjectContentItemType.Text => "text", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static RunStepDetailsToolCallsFileSearchResultObjectContentItemType? ToEnum(string value) + { + return value switch + { + "text" => RunStepDetailsToolCallsFileSearchResultObjectContentItemType.Text, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs index e275cc0214..be8d298634 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepDetailsToolCallsObject.g.verified.cs @@ -21,7 +21,7 @@ public sealed partial class RunStepDetailsToolCallsObject /// [global::System.Text.Json.Serialization.JsonPropertyName("tool_calls")] [global::System.Text.Json.Serialization.JsonRequired] - public required global::System.Collections.Generic.IList ToolCalls { get; set; } + public required global::System.Collections.Generic.IList ToolCalls { get; set; } /// /// Additional properties that are not explicitly defined in the schema @@ -40,7 +40,7 @@ public sealed partial class RunStepDetailsToolCallsObject /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public RunStepDetailsToolCallsObject( - global::System.Collections.Generic.IList toolCalls, + global::System.Collections.Generic.IList toolCalls, global::G.RunStepDetailsToolCallsObjectType type) { this.ToolCalls = toolCalls ?? throw new global::System.ArgumentNullException(nameof(toolCalls)); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObject.g.verified.cs index d933b1d063..7a431c4c72 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObject.g.verified.cs @@ -116,7 +116,7 @@ public sealed partial class RunStepObject public required global::System.DateTimeOffset? CompletedAt { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] [global::System.Text.Json.Serialization.JsonRequired] @@ -181,7 +181,7 @@ public sealed partial class RunStepObject /// The Unix timestamp (in seconds) for when the run step completed. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// /// Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObjectMetadata.g.verified.cs index a216aa2ff4..bd8b687226 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.RunStepObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class RunStepObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObject.g.verified.cs index 9ccf08450b..f41aa37624 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObject.g.verified.cs @@ -39,7 +39,7 @@ public sealed partial class ThreadObject public required global::G.ThreadObjectToolResources? ToolResources { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] [global::System.Text.Json.Serialization.JsonRequired] @@ -67,7 +67,7 @@ public sealed partial class ThreadObject /// A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ThreadObject( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectMetadata.g.verified.cs index 0f6148a4d5..2c985052b2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class ThreadObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs index 63d92eb641..03427398e4 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ThreadObjectToolResourcesCodeInterpreter.g.verified.cs @@ -10,7 +10,8 @@ namespace G public sealed partial class ThreadObjectToolResourcesCodeInterpreter { /// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] ///
[global::System.Text.Json.Serialization.JsonPropertyName("file_ids")] public global::System.Collections.Generic.IList? FileIds { get; set; } @@ -25,7 +26,8 @@ public sealed partial class ThreadObjectToolResourcesCodeInterpreter /// Initializes a new instance of the class. ///
/// - /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + /// A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.
+ /// Default Value: [] /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public ThreadObjectToolResourcesCodeInterpreter( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem.g.verified.cs index 1743c35d5d..88f8f3b4d0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } + public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } /// /// Details of the Code Interpreter tool call the run step was involved in. /// #if NET6_0_OR_GREATER - public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } #else - public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; } #endif /// @@ -35,17 +35,17 @@ namespace G /// /// /// - public static implicit operator ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject value) => new ToolCallsItem(value); + public static implicit operator ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsCodeObject value) => new ToolCallsItem(value); /// /// /// - public static implicit operator global::G.RunStepDetailsToolCallsCodeObject?(ToolCallsItem @this) => @this.CodeInterpreter; + public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsCodeObject?(ToolCallsItem @this) => @this.CodeInterpreter; /// /// /// - public ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject? value) + public ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? value) { CodeInterpreter = value; } @@ -54,9 +54,9 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject? value) /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } + public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } #else - public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; } + public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; } #endif /// @@ -70,17 +70,17 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsCodeObject? value) /// /// /// - public static implicit operator ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem(value); + public static implicit operator ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem(value); /// /// /// - public static implicit operator global::G.RunStepDetailsToolCallsFileSearchObject?(ToolCallsItem @this) => @this.FileSearch; + public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject?(ToolCallsItem @this) => @this.FileSearch; /// /// /// - public ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject? value) + public ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? value) { FileSearch = value; } @@ -89,9 +89,9 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject? value) /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; init; } + public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; init; } #else - public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; } + public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; } #endif /// @@ -105,17 +105,17 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsFileSearchObject? value) /// /// /// - public static implicit operator ToolCallsItem(global::G.RunStepDetailsToolCallsFunctionObject value) => new ToolCallsItem(value); + public static implicit operator ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject value) => new ToolCallsItem(value); /// /// /// - public static implicit operator global::G.RunStepDetailsToolCallsFunctionObject?(ToolCallsItem @this) => @this.Function; + public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFunctionObject?(ToolCallsItem @this) => @this.Function; /// /// /// - public ToolCallsItem(global::G.RunStepDetailsToolCallsFunctionObject? value) + public ToolCallsItem(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? value) { Function = value; } @@ -124,10 +124,10 @@ public ToolCallsItem(global::G.RunStepDetailsToolCallsFunctionObject? value) /// /// public ToolCallsItem( - global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? type, - global::G.RunStepDetailsToolCallsCodeObject? codeInterpreter, - global::G.RunStepDetailsToolCallsFileSearchObject? fileSearch, - global::G.RunStepDetailsToolCallsFunctionObject? function + global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? type, + global::G.RunStepDeltaStepDetailsToolCallsCodeObject? codeInterpreter, + global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? fileSearch, + global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? function ) { Type = type; @@ -158,9 +158,9 @@ public bool Validate() /// ///
public TResult? Match( - global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? codeInterpreter = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -188,9 +188,9 @@ public bool Validate() /// ///
public void Match( - global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? codeInterpreter = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -220,11 +220,11 @@ public override int GetHashCode() var fields = new object?[] { CodeInterpreter, - typeof(global::G.RunStepDetailsToolCallsCodeObject), + typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), FileSearch, - typeof(global::G.RunStepDetailsToolCallsFileSearchObject), + typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), Function, - typeof(global::G.RunStepDetailsToolCallsFunctionObject), + typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -241,9 +241,9 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ToolCallsItem other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem2.g.verified.cs index 1fd0486eb4..e676762742 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem2.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolCallsItem2.g.verified.cs @@ -13,15 +13,15 @@ namespace G /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } + public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? Type { get; } /// /// Details of the Code Interpreter tool call the run step was involved in. /// #if NET6_0_OR_GREATER - public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } + public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; init; } #else - public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? CodeInterpreter { get; } + public global::G.RunStepDetailsToolCallsCodeObject? CodeInterpreter { get; } #endif /// @@ -35,17 +35,17 @@ namespace G /// /// /// - public static implicit operator ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject value) => new ToolCallsItem2(value); + public static implicit operator ToolCallsItem2(global::G.RunStepDetailsToolCallsCodeObject value) => new ToolCallsItem2(value); /// /// /// - public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsCodeObject?(ToolCallsItem2 @this) => @this.CodeInterpreter; + public static implicit operator global::G.RunStepDetailsToolCallsCodeObject?(ToolCallsItem2 @this) => @this.CodeInterpreter; /// /// /// - public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? value) + public ToolCallsItem2(global::G.RunStepDetailsToolCallsCodeObject? value) { CodeInterpreter = value; } @@ -54,9 +54,9 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? valu /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } + public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; init; } #else - public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? FileSearch { get; } + public global::G.RunStepDetailsToolCallsFileSearchObject? FileSearch { get; } #endif /// @@ -70,17 +70,17 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsCodeObject? valu /// /// /// - public static implicit operator ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem2(value); + public static implicit operator ToolCallsItem2(global::G.RunStepDetailsToolCallsFileSearchObject value) => new ToolCallsItem2(value); /// /// /// - public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject?(ToolCallsItem2 @this) => @this.FileSearch; + public static implicit operator global::G.RunStepDetailsToolCallsFileSearchObject?(ToolCallsItem2 @this) => @this.FileSearch; /// /// /// - public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? value) + public ToolCallsItem2(global::G.RunStepDetailsToolCallsFileSearchObject? value) { FileSearch = value; } @@ -89,9 +89,9 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject /// /// #if NET6_0_OR_GREATER - public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; init; } + public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; init; } #else - public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Function { get; } + public global::G.RunStepDetailsToolCallsFunctionObject? Function { get; } #endif /// @@ -105,17 +105,17 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject /// /// /// - public static implicit operator ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject value) => new ToolCallsItem2(value); + public static implicit operator ToolCallsItem2(global::G.RunStepDetailsToolCallsFunctionObject value) => new ToolCallsItem2(value); /// /// /// - public static implicit operator global::G.RunStepDeltaStepDetailsToolCallsFunctionObject?(ToolCallsItem2 @this) => @this.Function; + public static implicit operator global::G.RunStepDetailsToolCallsFunctionObject?(ToolCallsItem2 @this) => @this.Function; /// /// /// - public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? value) + public ToolCallsItem2(global::G.RunStepDetailsToolCallsFunctionObject? value) { Function = value; } @@ -124,10 +124,10 @@ public ToolCallsItem2(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? /// /// public ToolCallsItem2( - global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? type, - global::G.RunStepDeltaStepDetailsToolCallsCodeObject? codeInterpreter, - global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? fileSearch, - global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? function + global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? type, + global::G.RunStepDetailsToolCallsCodeObject? codeInterpreter, + global::G.RunStepDetailsToolCallsFileSearchObject? fileSearch, + global::G.RunStepDetailsToolCallsFunctionObject? function ) { Type = type; @@ -158,9 +158,9 @@ public bool Validate() /// ///
public TResult? Match( - global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? codeInterpreter = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -188,9 +188,9 @@ public bool Validate() /// ///
public void Match( - global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? codeInterpreter = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -220,11 +220,11 @@ public override int GetHashCode() var fields = new object?[] { CodeInterpreter, - typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), + typeof(global::G.RunStepDetailsToolCallsCodeObject), FileSearch, - typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), + typeof(global::G.RunStepDetailsToolCallsFileSearchObject), Function, - typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), + typeof(global::G.RunStepDetailsToolCallsFunctionObject), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -241,9 +241,9 @@ static int HashCodeAggregator(int hashCode, object? value) => value == null public bool Equals(ToolCallsItem2 other) { return - global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem3.g.verified.cs index 7eb1a57846..62ca0eb1fe 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem3.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem3.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.ModifyAssistantRequestToolDiscriminatorType? Type { get; } + public global::G.CreateMessageRequestAttachmentToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem3(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearch? FileSearch { get; init; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearch? FileSearch { get; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } #endif /// @@ -70,78 +70,40 @@ public ToolsItem3(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem3(global::G.AssistantToolsFileSearch value) => new ToolsItem3(value); + public static implicit operator ToolsItem3(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem3(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem3 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem3 @this) => @this.FileSearch; /// /// /// - public ToolsItem3(global::G.AssistantToolsFileSearch? value) + public ToolsItem3(global::G.AssistantToolsFileSearchTypeOnly? value) { FileSearch = value; } - /// - /// - /// -#if NET6_0_OR_GREATER - public global::G.AssistantToolsFunction? Function { get; init; } -#else - public global::G.AssistantToolsFunction? Function { get; } -#endif - - /// - /// - /// -#if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] -#endif - public bool IsFunction => Function != null; - - /// - /// - /// - public static implicit operator ToolsItem3(global::G.AssistantToolsFunction value) => new ToolsItem3(value); - - /// - /// - /// - public static implicit operator global::G.AssistantToolsFunction?(ToolsItem3 @this) => @this.Function; - - /// - /// - /// - public ToolsItem3(global::G.AssistantToolsFunction? value) - { - Function = value; - } - /// /// /// public ToolsItem3( - global::G.ModifyAssistantRequestToolDiscriminatorType? type, + global::G.CreateMessageRequestAttachmentToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearch? fileSearch, - global::G.AssistantToolsFunction? function + global::G.AssistantToolsFileSearchTypeOnly? fileSearch ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; - Function = function; } /// /// /// public object? Object => - Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -151,7 +113,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; + return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; } /// @@ -159,8 +121,7 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? fileSearch = null, bool validate = true) { if (validate) @@ -176,10 +137,6 @@ public bool Validate() { return fileSearch(FileSearch!); } - else if (IsFunction && function != null) - { - return function(Function!); - } return default(TResult); } @@ -189,8 +146,7 @@ public bool Validate() /// public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? fileSearch = null, bool validate = true) { if (validate) @@ -206,10 +162,6 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } - else if (IsFunction) - { - function?.Invoke(Function!); - } } /// @@ -222,9 +174,7 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearch), - Function, - typeof(global::G.AssistantToolsFunction), + typeof(global::G.AssistantToolsFileSearchTypeOnly), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -242,8 +192,7 @@ public bool Equals(ToolsItem3 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem4.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem4.g.verified.cs index d58b82cb8d..a57a960987 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem4.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem4.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.RunObjectToolDiscriminatorType? Type { get; } + public global::G.CreateRunRequestToolDiscriminatorType? Type { get; } /// /// @@ -124,7 +124,7 @@ public ToolsItem4(global::G.AssistantToolsFunction? value) /// /// public ToolsItem4( - global::G.RunObjectToolDiscriminatorType? type, + global::G.CreateRunRequestToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, global::G.AssistantToolsFileSearch? fileSearch, global::G.AssistantToolsFunction? function diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem5.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem5.g.verified.cs index 334f43b10f..18d3b76c38 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem5.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem5.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.CreateRunRequestToolDiscriminatorType? Type { get; } + public global::G.CreateThreadAndRunRequestToolDiscriminatorType? Type { get; } /// /// @@ -124,7 +124,7 @@ public ToolsItem5(global::G.AssistantToolsFunction? value) /// /// public ToolsItem5( - global::G.CreateRunRequestToolDiscriminatorType? type, + global::G.CreateThreadAndRunRequestToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, global::G.AssistantToolsFileSearch? fileSearch, global::G.AssistantToolsFunction? function diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem6.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem6.g.verified.cs index 8dc953639a..37d293f114 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem6.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem6.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.CreateThreadAndRunRequestToolDiscriminatorType? Type { get; } + public global::G.MessageObjectAttachmentToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem6(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearch? FileSearch { get; init; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearch? FileSearch { get; } + public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } #endif /// @@ -70,78 +70,40 @@ public ToolsItem6(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem6(global::G.AssistantToolsFileSearch value) => new ToolsItem6(value); + public static implicit operator ToolsItem6(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem6(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem6 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem6 @this) => @this.FileSearch; /// /// /// - public ToolsItem6(global::G.AssistantToolsFileSearch? value) + public ToolsItem6(global::G.AssistantToolsFileSearchTypeOnly? value) { FileSearch = value; } - /// - /// - /// -#if NET6_0_OR_GREATER - public global::G.AssistantToolsFunction? Function { get; init; } -#else - public global::G.AssistantToolsFunction? Function { get; } -#endif - - /// - /// - /// -#if NET6_0_OR_GREATER - [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] -#endif - public bool IsFunction => Function != null; - - /// - /// - /// - public static implicit operator ToolsItem6(global::G.AssistantToolsFunction value) => new ToolsItem6(value); - - /// - /// - /// - public static implicit operator global::G.AssistantToolsFunction?(ToolsItem6 @this) => @this.Function; - - /// - /// - /// - public ToolsItem6(global::G.AssistantToolsFunction? value) - { - Function = value; - } - /// /// /// public ToolsItem6( - global::G.CreateThreadAndRunRequestToolDiscriminatorType? type, + global::G.MessageObjectAttachmentToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearch? fileSearch, - global::G.AssistantToolsFunction? function + global::G.AssistantToolsFileSearchTypeOnly? fileSearch ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; - Function = function; } /// /// /// public object? Object => - Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -151,7 +113,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; + return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; } /// @@ -159,8 +121,7 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, - global::System.Func? function = null, + global::System.Func? fileSearch = null, bool validate = true) { if (validate) @@ -176,10 +137,6 @@ public bool Validate() { return fileSearch(FileSearch!); } - else if (IsFunction && function != null) - { - return function(Function!); - } return default(TResult); } @@ -189,8 +146,7 @@ public bool Validate() /// public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, - global::System.Action? function = null, + global::System.Action? fileSearch = null, bool validate = true) { if (validate) @@ -206,10 +162,6 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } - else if (IsFunction) - { - function?.Invoke(Function!); - } } /// @@ -222,9 +174,7 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearch), - Function, - typeof(global::G.AssistantToolsFunction), + typeof(global::G.AssistantToolsFileSearchTypeOnly), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -242,8 +192,7 @@ public bool Equals(ToolsItem6 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem7.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem7.g.verified.cs index 5d080f6db5..4bb5b7c86e 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem7.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem7.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.MessageObjectAttachmentToolDiscriminatorType? Type { get; } + public global::G.ModifyAssistantRequestToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem7(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } + public global::G.AssistantToolsFileSearch? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } + public global::G.AssistantToolsFileSearch? FileSearch { get; } #endif /// @@ -70,40 +70,78 @@ public ToolsItem7(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem7(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem7(value); + public static implicit operator ToolsItem7(global::G.AssistantToolsFileSearch value) => new ToolsItem7(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem7 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem7 @this) => @this.FileSearch; /// /// /// - public ToolsItem7(global::G.AssistantToolsFileSearchTypeOnly? value) + public ToolsItem7(global::G.AssistantToolsFileSearch? value) { FileSearch = value; } + /// + /// + /// +#if NET6_0_OR_GREATER + public global::G.AssistantToolsFunction? Function { get; init; } +#else + public global::G.AssistantToolsFunction? Function { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] +#endif + public bool IsFunction => Function != null; + + /// + /// + /// + public static implicit operator ToolsItem7(global::G.AssistantToolsFunction value) => new ToolsItem7(value); + + /// + /// + /// + public static implicit operator global::G.AssistantToolsFunction?(ToolsItem7 @this) => @this.Function; + + /// + /// + /// + public ToolsItem7(global::G.AssistantToolsFunction? value) + { + Function = value; + } + /// /// /// public ToolsItem7( - global::G.MessageObjectAttachmentToolDiscriminatorType? type, + global::G.ModifyAssistantRequestToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearchTypeOnly? fileSearch + global::G.AssistantToolsFileSearch? fileSearch, + global::G.AssistantToolsFunction? function ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; + Function = function; } /// /// /// public object? Object => + Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -113,7 +151,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; + return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; } /// @@ -121,7 +159,8 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -137,6 +176,10 @@ public bool Validate() { return fileSearch(FileSearch!); } + else if (IsFunction && function != null) + { + return function(Function!); + } return default(TResult); } @@ -146,7 +189,8 @@ public bool Validate() /// public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -162,6 +206,10 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } + else if (IsFunction) + { + function?.Invoke(Function!); + } } /// @@ -174,7 +222,9 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearchTypeOnly), + typeof(global::G.AssistantToolsFileSearch), + Function, + typeof(global::G.AssistantToolsFunction), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -192,7 +242,8 @@ public bool Equals(ToolsItem7 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem8.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem8.g.verified.cs index be3be8b483..d5a50b6d44 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem8.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ToolsItem8.g.verified.cs @@ -13,7 +13,7 @@ namespace G /// /// /// - public global::G.CreateMessageRequestAttachmentToolDiscriminatorType? Type { get; } + public global::G.RunObjectToolDiscriminatorType? Type { get; } /// /// @@ -54,9 +54,9 @@ public ToolsItem8(global::G.AssistantToolsCode? value) /// /// #if NET6_0_OR_GREATER - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; init; } + public global::G.AssistantToolsFileSearch? FileSearch { get; init; } #else - public global::G.AssistantToolsFileSearchTypeOnly? FileSearch { get; } + public global::G.AssistantToolsFileSearch? FileSearch { get; } #endif /// @@ -70,40 +70,78 @@ public ToolsItem8(global::G.AssistantToolsCode? value) /// /// /// - public static implicit operator ToolsItem8(global::G.AssistantToolsFileSearchTypeOnly value) => new ToolsItem8(value); + public static implicit operator ToolsItem8(global::G.AssistantToolsFileSearch value) => new ToolsItem8(value); /// /// /// - public static implicit operator global::G.AssistantToolsFileSearchTypeOnly?(ToolsItem8 @this) => @this.FileSearch; + public static implicit operator global::G.AssistantToolsFileSearch?(ToolsItem8 @this) => @this.FileSearch; /// /// /// - public ToolsItem8(global::G.AssistantToolsFileSearchTypeOnly? value) + public ToolsItem8(global::G.AssistantToolsFileSearch? value) { FileSearch = value; } + /// + /// + /// +#if NET6_0_OR_GREATER + public global::G.AssistantToolsFunction? Function { get; init; } +#else + public global::G.AssistantToolsFunction? Function { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Function))] +#endif + public bool IsFunction => Function != null; + + /// + /// + /// + public static implicit operator ToolsItem8(global::G.AssistantToolsFunction value) => new ToolsItem8(value); + + /// + /// + /// + public static implicit operator global::G.AssistantToolsFunction?(ToolsItem8 @this) => @this.Function; + + /// + /// + /// + public ToolsItem8(global::G.AssistantToolsFunction? value) + { + Function = value; + } + /// /// /// public ToolsItem8( - global::G.CreateMessageRequestAttachmentToolDiscriminatorType? type, + global::G.RunObjectToolDiscriminatorType? type, global::G.AssistantToolsCode? codeInterpreter, - global::G.AssistantToolsFileSearchTypeOnly? fileSearch + global::G.AssistantToolsFileSearch? fileSearch, + global::G.AssistantToolsFunction? function ) { Type = type; CodeInterpreter = codeInterpreter; FileSearch = fileSearch; + Function = function; } /// /// /// public object? Object => + Function as object ?? FileSearch as object ?? CodeInterpreter as object ; @@ -113,7 +151,7 @@ CodeInterpreter as object /// public bool Validate() { - return IsCodeInterpreter && !IsFileSearch || !IsCodeInterpreter && IsFileSearch; + return IsCodeInterpreter && !IsFileSearch && !IsFunction || !IsCodeInterpreter && IsFileSearch && !IsFunction || !IsCodeInterpreter && !IsFileSearch && IsFunction; } /// @@ -121,7 +159,8 @@ public bool Validate() /// public TResult? Match( global::System.Func? codeInterpreter = null, - global::System.Func? fileSearch = null, + global::System.Func? fileSearch = null, + global::System.Func? function = null, bool validate = true) { if (validate) @@ -137,6 +176,10 @@ public bool Validate() { return fileSearch(FileSearch!); } + else if (IsFunction && function != null) + { + return function(Function!); + } return default(TResult); } @@ -146,7 +189,8 @@ public bool Validate() /// public void Match( global::System.Action? codeInterpreter = null, - global::System.Action? fileSearch = null, + global::System.Action? fileSearch = null, + global::System.Action? function = null, bool validate = true) { if (validate) @@ -162,6 +206,10 @@ public void Match( { fileSearch?.Invoke(FileSearch!); } + else if (IsFunction) + { + function?.Invoke(Function!); + } } /// @@ -174,7 +222,9 @@ public override int GetHashCode() CodeInterpreter, typeof(global::G.AssistantToolsCode), FileSearch, - typeof(global::G.AssistantToolsFileSearchTypeOnly), + typeof(global::G.AssistantToolsFileSearch), + Function, + typeof(global::G.AssistantToolsFunction), }; const int offset = unchecked((int)2166136261); const int prime = 16777619; @@ -192,7 +242,8 @@ public bool Equals(ToolsItem8 other) { return global::System.Collections.Generic.EqualityComparer.Default.Equals(CodeInterpreter, other.CodeInterpreter) && - global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) + global::System.Collections.Generic.EqualityComparer.Default.Equals(FileSearch, other.FileSearch) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Function, other.Function) ; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs index ec4488466d..b24819c2f3 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequest.g.verified.cs @@ -22,7 +22,7 @@ public sealed partial class UpdateVectorStoreRequest public global::G.VectorStoreExpirationAfter? ExpiresAfter { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] public object? Metadata { get; set; } @@ -43,7 +43,7 @@ public sealed partial class UpdateVectorStoreRequest /// The expiration policy for a vector store. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public UpdateVectorStoreRequest( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs index 149d695f13..38da94577a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UpdateVectorStoreRequestMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class UpdateVectorStoreRequestMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesBucketWidth.g.verified.cs new file mode 100644 index 0000000000..3c967d8d9a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageAudioSpeechesBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageAudioSpeechesBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioSpeechesBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioSpeechesBucketWidth value) + { + return value switch + { + UsageAudioSpeechesBucketWidth.x1m => "1m", + UsageAudioSpeechesBucketWidth.x1h => "1h", + UsageAudioSpeechesBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioSpeechesBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageAudioSpeechesBucketWidth.x1m, + "1h" => UsageAudioSpeechesBucketWidth.x1h, + "1d" => UsageAudioSpeechesBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesGroupByItem.g.verified.cs new file mode 100644 index 0000000000..e9267e4920 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesGroupByItem.g.verified.cs @@ -0,0 +1,64 @@ +//HintName: G.Models.UsageAudioSpeechesGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageAudioSpeechesGroupByItem + { + /// + /// + /// + ProjectId, + /// + /// + /// + UserId, + /// + /// + /// + ApiKeyId, + /// + /// + /// + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioSpeechesGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioSpeechesGroupByItem value) + { + return value switch + { + UsageAudioSpeechesGroupByItem.ProjectId => "project_id", + UsageAudioSpeechesGroupByItem.UserId => "user_id", + UsageAudioSpeechesGroupByItem.ApiKeyId => "api_key_id", + UsageAudioSpeechesGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioSpeechesGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageAudioSpeechesGroupByItem.ProjectId, + "user_id" => UsageAudioSpeechesGroupByItem.UserId, + "api_key_id" => UsageAudioSpeechesGroupByItem.ApiKeyId, + "model" => UsageAudioSpeechesGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResult.Json.g.verified.cs new file mode 100644 index 0000000000..4ffdeb75d9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageAudioSpeechesResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageAudioSpeechesResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageAudioSpeechesResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageAudioSpeechesResult), + jsonSerializerContext) as global::G.UsageAudioSpeechesResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageAudioSpeechesResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageAudioSpeechesResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageAudioSpeechesResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResult.g.verified.cs new file mode 100644 index 0000000000..9ca4bd4586 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResult.g.verified.cs @@ -0,0 +1,111 @@ +//HintName: G.Models.UsageAudioSpeechesResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated audio speeches usage details of the specific time bucket. + /// + public sealed partial class UsageAudioSpeechesResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageAudioSpeechesResultObjectJsonConverter))] + public global::G.UsageAudioSpeechesResultObject Object { get; set; } + + /// + /// The number of characters processed. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("characters")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int Characters { get; set; } + + /// + /// The count of requests made to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("num_model_requests")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int NumModelRequests { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of characters processed. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageAudioSpeechesResult( + int characters, + int numModelRequests, + global::G.UsageAudioSpeechesResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.Characters = characters; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageAudioSpeechesResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResultObject.g.verified.cs new file mode 100644 index 0000000000..b309e7eacb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioSpeechesResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageAudioSpeechesResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageAudioSpeechesResultObject + { + /// + /// + /// + OrganizationUsageAudioSpeechesResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioSpeechesResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioSpeechesResultObject value) + { + return value switch + { + UsageAudioSpeechesResultObject.OrganizationUsageAudioSpeechesResult => "organization.usage.audio_speeches.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioSpeechesResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.audio_speeches.result" => UsageAudioSpeechesResultObject.OrganizationUsageAudioSpeechesResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..e9d690757f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageAudioTranscriptionsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageAudioTranscriptionsBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioTranscriptionsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioTranscriptionsBucketWidth value) + { + return value switch + { + UsageAudioTranscriptionsBucketWidth.x1m => "1m", + UsageAudioTranscriptionsBucketWidth.x1h => "1h", + UsageAudioTranscriptionsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioTranscriptionsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageAudioTranscriptionsBucketWidth.x1m, + "1h" => UsageAudioTranscriptionsBucketWidth.x1h, + "1d" => UsageAudioTranscriptionsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..7631fa1e29 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsGroupByItem.g.verified.cs @@ -0,0 +1,64 @@ +//HintName: G.Models.UsageAudioTranscriptionsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageAudioTranscriptionsGroupByItem + { + /// + /// + /// + ProjectId, + /// + /// + /// + UserId, + /// + /// + /// + ApiKeyId, + /// + /// + /// + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioTranscriptionsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioTranscriptionsGroupByItem value) + { + return value switch + { + UsageAudioTranscriptionsGroupByItem.ProjectId => "project_id", + UsageAudioTranscriptionsGroupByItem.UserId => "user_id", + UsageAudioTranscriptionsGroupByItem.ApiKeyId => "api_key_id", + UsageAudioTranscriptionsGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioTranscriptionsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageAudioTranscriptionsGroupByItem.ProjectId, + "user_id" => UsageAudioTranscriptionsGroupByItem.UserId, + "api_key_id" => UsageAudioTranscriptionsGroupByItem.ApiKeyId, + "model" => UsageAudioTranscriptionsGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResult.Json.g.verified.cs new file mode 100644 index 0000000000..75eff742e3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageAudioTranscriptionsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageAudioTranscriptionsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageAudioTranscriptionsResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageAudioTranscriptionsResult), + jsonSerializerContext) as global::G.UsageAudioTranscriptionsResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageAudioTranscriptionsResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageAudioTranscriptionsResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageAudioTranscriptionsResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResult.g.verified.cs new file mode 100644 index 0000000000..eb513d4478 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResult.g.verified.cs @@ -0,0 +1,111 @@ +//HintName: G.Models.UsageAudioTranscriptionsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated audio transcriptions usage details of the specific time bucket. + /// + public sealed partial class UsageAudioTranscriptionsResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageAudioTranscriptionsResultObjectJsonConverter))] + public global::G.UsageAudioTranscriptionsResultObject Object { get; set; } + + /// + /// The number of seconds processed. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("seconds")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int Seconds { get; set; } + + /// + /// The count of requests made to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("num_model_requests")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int NumModelRequests { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of seconds processed. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageAudioTranscriptionsResult( + int seconds, + int numModelRequests, + global::G.UsageAudioTranscriptionsResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.Seconds = seconds; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageAudioTranscriptionsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResultObject.g.verified.cs new file mode 100644 index 0000000000..a5e218d45e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageAudioTranscriptionsResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageAudioTranscriptionsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageAudioTranscriptionsResultObject + { + /// + /// + /// + OrganizationUsageAudioTranscriptionsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageAudioTranscriptionsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageAudioTranscriptionsResultObject value) + { + return value switch + { + UsageAudioTranscriptionsResultObject.OrganizationUsageAudioTranscriptionsResult => "organization.usage.audio_transcriptions.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageAudioTranscriptionsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.audio_transcriptions.result" => UsageAudioTranscriptionsResultObject.OrganizationUsageAudioTranscriptionsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..e1bfd91aa9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageCodeInterpreterSessionsBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCodeInterpreterSessionsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCodeInterpreterSessionsBucketWidth value) + { + return value switch + { + UsageCodeInterpreterSessionsBucketWidth.x1m => "1m", + UsageCodeInterpreterSessionsBucketWidth.x1h => "1h", + UsageCodeInterpreterSessionsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCodeInterpreterSessionsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageCodeInterpreterSessionsBucketWidth.x1m, + "1h" => UsageCodeInterpreterSessionsBucketWidth.x1h, + "1d" => UsageCodeInterpreterSessionsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..7a31c9b9dd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageCodeInterpreterSessionsGroupByItem + { + /// + /// + /// + ProjectId, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCodeInterpreterSessionsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCodeInterpreterSessionsGroupByItem value) + { + return value switch + { + UsageCodeInterpreterSessionsGroupByItem.ProjectId => "project_id", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCodeInterpreterSessionsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageCodeInterpreterSessionsGroupByItem.ProjectId, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResult.Json.g.verified.cs new file mode 100644 index 0000000000..a12fe13d92 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageCodeInterpreterSessionsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageCodeInterpreterSessionsResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageCodeInterpreterSessionsResult), + jsonSerializerContext) as global::G.UsageCodeInterpreterSessionsResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageCodeInterpreterSessionsResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageCodeInterpreterSessionsResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageCodeInterpreterSessionsResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResult.g.verified.cs new file mode 100644 index 0000000000..99c9968425 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResult.g.verified.cs @@ -0,0 +1,66 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated code interpreter sessions usage details of the specific time bucket. + /// + public sealed partial class UsageCodeInterpreterSessionsResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageCodeInterpreterSessionsResultObjectJsonConverter))] + public global::G.UsageCodeInterpreterSessionsResultObject Object { get; set; } + + /// + /// The number of code interpreter sessions. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("sessions")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int Sessions { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of code interpreter sessions. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageCodeInterpreterSessionsResult( + int sessions, + global::G.UsageCodeInterpreterSessionsResultObject @object, + string? projectId) + { + this.Sessions = sessions; + this.Object = @object; + this.ProjectId = projectId; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageCodeInterpreterSessionsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResultObject.g.verified.cs new file mode 100644 index 0000000000..268ca4f412 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCodeInterpreterSessionsResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageCodeInterpreterSessionsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageCodeInterpreterSessionsResultObject + { + /// + /// + /// + OrganizationUsageCodeInterpreterSessionsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCodeInterpreterSessionsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCodeInterpreterSessionsResultObject value) + { + return value switch + { + UsageCodeInterpreterSessionsResultObject.OrganizationUsageCodeInterpreterSessionsResult => "organization.usage.code_interpreter_sessions.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCodeInterpreterSessionsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.code_interpreter_sessions.result" => UsageCodeInterpreterSessionsResultObject.OrganizationUsageCodeInterpreterSessionsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..5e1a491054 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageCompletionsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageCompletionsBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCompletionsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCompletionsBucketWidth value) + { + return value switch + { + UsageCompletionsBucketWidth.x1m => "1m", + UsageCompletionsBucketWidth.x1h => "1h", + UsageCompletionsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCompletionsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageCompletionsBucketWidth.x1m, + "1h" => UsageCompletionsBucketWidth.x1h, + "1d" => UsageCompletionsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..d8338ac4f5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsGroupByItem.g.verified.cs @@ -0,0 +1,70 @@ +//HintName: G.Models.UsageCompletionsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageCompletionsGroupByItem + { + /// + /// + /// + ProjectId, + /// + /// + /// + UserId, + /// + /// + /// + ApiKeyId, + /// + /// + /// + Model, + /// + /// + /// + Batch, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCompletionsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCompletionsGroupByItem value) + { + return value switch + { + UsageCompletionsGroupByItem.ProjectId => "project_id", + UsageCompletionsGroupByItem.UserId => "user_id", + UsageCompletionsGroupByItem.ApiKeyId => "api_key_id", + UsageCompletionsGroupByItem.Model => "model", + UsageCompletionsGroupByItem.Batch => "batch", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCompletionsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageCompletionsGroupByItem.ProjectId, + "user_id" => UsageCompletionsGroupByItem.UserId, + "api_key_id" => UsageCompletionsGroupByItem.ApiKeyId, + "model" => UsageCompletionsGroupByItem.Model, + "batch" => UsageCompletionsGroupByItem.Batch, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResult.Json.g.verified.cs new file mode 100644 index 0000000000..4f5006fd38 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageCompletionsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageCompletionsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageCompletionsResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageCompletionsResult), + jsonSerializerContext) as global::G.UsageCompletionsResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageCompletionsResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageCompletionsResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageCompletionsResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResult.g.verified.cs new file mode 100644 index 0000000000..59f38e3a93 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResult.g.verified.cs @@ -0,0 +1,145 @@ +//HintName: G.Models.UsageCompletionsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated completions usage details of the specific time bucket. + /// + public sealed partial class UsageCompletionsResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageCompletionsResultObjectJsonConverter))] + public global::G.UsageCompletionsResultObject Object { get; set; } + + /// + /// The number of input tokens used. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_tokens")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int InputTokens { get; set; } + + /// + /// The number of input tokens that has been cached from previous requests. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_cached_tokens")] + public int? InputCachedTokens { get; set; } + + /// + /// The number of output tokens used. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("output_tokens")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int OutputTokens { get; set; } + + /// + /// The count of requests made to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("num_model_requests")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int NumModelRequests { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// When `group_by=batch`, this field tells whether the grouped usage result is batch or not. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("batch")] + public bool? Batch { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of input tokens used. + /// + /// + /// The number of input tokens that has been cached from previous requests. + /// + /// + /// The number of output tokens used. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + /// + /// When `group_by=batch`, this field tells whether the grouped usage result is batch or not. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageCompletionsResult( + int inputTokens, + int outputTokens, + int numModelRequests, + global::G.UsageCompletionsResultObject @object, + int? inputCachedTokens, + string? projectId, + string? userId, + string? apiKeyId, + string? model, + bool? batch) + { + this.InputTokens = inputTokens; + this.OutputTokens = outputTokens; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.InputCachedTokens = inputCachedTokens; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + this.Batch = batch; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageCompletionsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResultObject.g.verified.cs new file mode 100644 index 0000000000..01abca4a47 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCompletionsResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageCompletionsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageCompletionsResultObject + { + /// + /// + /// + OrganizationUsageCompletionsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCompletionsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCompletionsResultObject value) + { + return value switch + { + UsageCompletionsResultObject.OrganizationUsageCompletionsResult => "organization.usage.completions.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCompletionsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.completions.result" => UsageCompletionsResultObject.OrganizationUsageCompletionsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCostsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCostsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..c01158955c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCostsBucketWidth.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageCostsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageCostsBucketWidth + { + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCostsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCostsBucketWidth value) + { + return value switch + { + UsageCostsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCostsBucketWidth? ToEnum(string value) + { + return value switch + { + "1d" => UsageCostsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCostsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCostsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..5902e7db2d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageCostsGroupByItem.g.verified.cs @@ -0,0 +1,52 @@ +//HintName: G.Models.UsageCostsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageCostsGroupByItem + { + /// + /// + /// + ProjectId, + /// + /// + /// + LineItem, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageCostsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageCostsGroupByItem value) + { + return value switch + { + UsageCostsGroupByItem.ProjectId => "project_id", + UsageCostsGroupByItem.LineItem => "line_item", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageCostsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageCostsGroupByItem.ProjectId, + "line_item" => UsageCostsGroupByItem.LineItem, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..9e698d2526 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageEmbeddingsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageEmbeddingsBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageEmbeddingsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageEmbeddingsBucketWidth value) + { + return value switch + { + UsageEmbeddingsBucketWidth.x1m => "1m", + UsageEmbeddingsBucketWidth.x1h => "1h", + UsageEmbeddingsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageEmbeddingsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageEmbeddingsBucketWidth.x1m, + "1h" => UsageEmbeddingsBucketWidth.x1h, + "1d" => UsageEmbeddingsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..2927f1b2c3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsGroupByItem.g.verified.cs @@ -0,0 +1,64 @@ +//HintName: G.Models.UsageEmbeddingsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageEmbeddingsGroupByItem + { + /// + /// + /// + ProjectId, + /// + /// + /// + UserId, + /// + /// + /// + ApiKeyId, + /// + /// + /// + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageEmbeddingsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageEmbeddingsGroupByItem value) + { + return value switch + { + UsageEmbeddingsGroupByItem.ProjectId => "project_id", + UsageEmbeddingsGroupByItem.UserId => "user_id", + UsageEmbeddingsGroupByItem.ApiKeyId => "api_key_id", + UsageEmbeddingsGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageEmbeddingsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageEmbeddingsGroupByItem.ProjectId, + "user_id" => UsageEmbeddingsGroupByItem.UserId, + "api_key_id" => UsageEmbeddingsGroupByItem.ApiKeyId, + "model" => UsageEmbeddingsGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResult.Json.g.verified.cs new file mode 100644 index 0000000000..6ad15d4978 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageEmbeddingsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageEmbeddingsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageEmbeddingsResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageEmbeddingsResult), + jsonSerializerContext) as global::G.UsageEmbeddingsResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageEmbeddingsResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageEmbeddingsResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageEmbeddingsResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResult.g.verified.cs new file mode 100644 index 0000000000..26db447a70 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResult.g.verified.cs @@ -0,0 +1,111 @@ +//HintName: G.Models.UsageEmbeddingsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated embeddings usage details of the specific time bucket. + /// + public sealed partial class UsageEmbeddingsResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageEmbeddingsResultObjectJsonConverter))] + public global::G.UsageEmbeddingsResultObject Object { get; set; } + + /// + /// The number of input tokens used. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_tokens")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int InputTokens { get; set; } + + /// + /// The count of requests made to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("num_model_requests")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int NumModelRequests { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of input tokens used. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageEmbeddingsResult( + int inputTokens, + int numModelRequests, + global::G.UsageEmbeddingsResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.InputTokens = inputTokens; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageEmbeddingsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResultObject.g.verified.cs new file mode 100644 index 0000000000..2db89ad016 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageEmbeddingsResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageEmbeddingsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageEmbeddingsResultObject + { + /// + /// + /// + OrganizationUsageEmbeddingsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageEmbeddingsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageEmbeddingsResultObject value) + { + return value switch + { + UsageEmbeddingsResultObject.OrganizationUsageEmbeddingsResult => "organization.usage.embeddings.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageEmbeddingsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.embeddings.result" => UsageEmbeddingsResultObject.OrganizationUsageEmbeddingsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesBucketWidth.g.verified.cs new file mode 100644 index 0000000000..8fddc1d707 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageImagesBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageImagesBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesBucketWidth value) + { + return value switch + { + UsageImagesBucketWidth.x1m => "1m", + UsageImagesBucketWidth.x1h => "1h", + UsageImagesBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageImagesBucketWidth.x1m, + "1h" => UsageImagesBucketWidth.x1h, + "1d" => UsageImagesBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesGroupByItem.g.verified.cs new file mode 100644 index 0000000000..c0f8a96609 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesGroupByItem.g.verified.cs @@ -0,0 +1,76 @@ +//HintName: G.Models.UsageImagesGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageImagesGroupByItem + { + /// + /// + /// + ProjectId, + /// + /// + /// + UserId, + /// + /// + /// + ApiKeyId, + /// + /// + /// + Model, + /// + /// + /// + Size, + /// + /// + /// + Source, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesGroupByItem value) + { + return value switch + { + UsageImagesGroupByItem.ProjectId => "project_id", + UsageImagesGroupByItem.UserId => "user_id", + UsageImagesGroupByItem.ApiKeyId => "api_key_id", + UsageImagesGroupByItem.Model => "model", + UsageImagesGroupByItem.Size => "size", + UsageImagesGroupByItem.Source => "source", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageImagesGroupByItem.ProjectId, + "user_id" => UsageImagesGroupByItem.UserId, + "api_key_id" => UsageImagesGroupByItem.ApiKeyId, + "model" => UsageImagesGroupByItem.Model, + "size" => UsageImagesGroupByItem.Size, + "source" => UsageImagesGroupByItem.Source, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResult.Json.g.verified.cs new file mode 100644 index 0000000000..84ab35e11d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageImagesResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageImagesResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageImagesResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageImagesResult), + jsonSerializerContext) as global::G.UsageImagesResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageImagesResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageImagesResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageImagesResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResult.g.verified.cs new file mode 100644 index 0000000000..cb37cae6fc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResult.g.verified.cs @@ -0,0 +1,133 @@ +//HintName: G.Models.UsageImagesResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated images usage details of the specific time bucket. + /// + public sealed partial class UsageImagesResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageImagesResultObjectJsonConverter))] + public global::G.UsageImagesResultObject Object { get; set; } + + /// + /// The number of images processed. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("images")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int Images { get; set; } + + /// + /// The count of requests made to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("num_model_requests")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int NumModelRequests { get; set; } + + /// + /// When `group_by=source`, this field provides the source of the grouped usage result, possible values are `image.generation`, `image.edit`, `image.variation`. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("source")] + public string? Source { get; set; } + + /// + /// When `group_by=size`, this field provides the image size of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("size")] + public string? Size { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of images processed. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=source`, this field provides the source of the grouped usage result, possible values are `image.generation`, `image.edit`, `image.variation`. + /// + /// + /// When `group_by=size`, this field provides the image size of the grouped usage result. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageImagesResult( + int images, + int numModelRequests, + global::G.UsageImagesResultObject @object, + string? source, + string? size, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.Images = images; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.Source = source; + this.Size = size; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageImagesResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResultObject.g.verified.cs new file mode 100644 index 0000000000..0b948c61f8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageImagesResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageImagesResultObject + { + /// + /// + /// + OrganizationUsageImagesResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesResultObject value) + { + return value switch + { + UsageImagesResultObject.OrganizationUsageImagesResult => "organization.usage.images.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.images.result" => UsageImagesResultObject.OrganizationUsageImagesResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesSize.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesSize.g.verified.cs new file mode 100644 index 0000000000..770b1fbbda --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesSize.g.verified.cs @@ -0,0 +1,70 @@ +//HintName: G.Models.UsageImagesSize.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageImagesSize + { + /// + /// + /// + x256x256, + /// + /// + /// + x512x512, + /// + /// + /// + x1024x1024, + /// + /// + /// + x1792x1792, + /// + /// + /// + x1024x1792, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesSizeExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesSize value) + { + return value switch + { + UsageImagesSize.x256x256 => "256x256", + UsageImagesSize.x512x512 => "512x512", + UsageImagesSize.x1024x1024 => "1024x1024", + UsageImagesSize.x1792x1792 => "1792x1792", + UsageImagesSize.x1024x1792 => "1024x1792", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesSize? ToEnum(string value) + { + return value switch + { + "256x256" => UsageImagesSize.x256x256, + "512x512" => UsageImagesSize.x512x512, + "1024x1024" => UsageImagesSize.x1024x1024, + "1792x1792" => UsageImagesSize.x1792x1792, + "1024x1792" => UsageImagesSize.x1024x1792, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesSource.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesSource.g.verified.cs new file mode 100644 index 0000000000..1287da3e25 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageImagesSource.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageImagesSource.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageImagesSource + { + /// + /// + /// + ImageGeneration, + /// + /// + /// + ImageEdit, + /// + /// + /// + ImageVariation, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageImagesSourceExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageImagesSource value) + { + return value switch + { + UsageImagesSource.ImageGeneration => "image.generation", + UsageImagesSource.ImageEdit => "image.edit", + UsageImagesSource.ImageVariation => "image.variation", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageImagesSource? ToEnum(string value) + { + return value switch + { + "image.generation" => UsageImagesSource.ImageGeneration, + "image.edit" => UsageImagesSource.ImageEdit, + "image.variation" => UsageImagesSource.ImageVariation, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..bc87f9af35 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageModerationsBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageModerationsBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageModerationsBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageModerationsBucketWidth value) + { + return value switch + { + UsageModerationsBucketWidth.x1m => "1m", + UsageModerationsBucketWidth.x1h => "1h", + UsageModerationsBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageModerationsBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageModerationsBucketWidth.x1m, + "1h" => UsageModerationsBucketWidth.x1h, + "1d" => UsageModerationsBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..0e11be959a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsGroupByItem.g.verified.cs @@ -0,0 +1,64 @@ +//HintName: G.Models.UsageModerationsGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageModerationsGroupByItem + { + /// + /// + /// + ProjectId, + /// + /// + /// + UserId, + /// + /// + /// + ApiKeyId, + /// + /// + /// + Model, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageModerationsGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageModerationsGroupByItem value) + { + return value switch + { + UsageModerationsGroupByItem.ProjectId => "project_id", + UsageModerationsGroupByItem.UserId => "user_id", + UsageModerationsGroupByItem.ApiKeyId => "api_key_id", + UsageModerationsGroupByItem.Model => "model", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageModerationsGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageModerationsGroupByItem.ProjectId, + "user_id" => UsageModerationsGroupByItem.UserId, + "api_key_id" => UsageModerationsGroupByItem.ApiKeyId, + "model" => UsageModerationsGroupByItem.Model, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResult.Json.g.verified.cs new file mode 100644 index 0000000000..de16ec2be1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageModerationsResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageModerationsResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageModerationsResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageModerationsResult), + jsonSerializerContext) as global::G.UsageModerationsResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageModerationsResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageModerationsResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageModerationsResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResult.g.verified.cs new file mode 100644 index 0000000000..48b2bb2c81 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResult.g.verified.cs @@ -0,0 +1,111 @@ +//HintName: G.Models.UsageModerationsResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated moderations usage details of the specific time bucket. + /// + public sealed partial class UsageModerationsResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageModerationsResultObjectJsonConverter))] + public global::G.UsageModerationsResultObject Object { get; set; } + + /// + /// The number of input tokens used. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("input_tokens")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int InputTokens { get; set; } + + /// + /// The count of requests made to the model. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("num_model_requests")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int NumModelRequests { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("user_id")] + public string? UserId { get; set; } + + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("api_key_id")] + public string? ApiKeyId { get; set; } + + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("model")] + public string? Model { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The number of input tokens used. + /// + /// + /// The count of requests made to the model. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + /// + /// When `group_by=user_id`, this field provides the user ID of the grouped usage result. + /// + /// + /// When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result. + /// + /// + /// When `group_by=model`, this field provides the model name of the grouped usage result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageModerationsResult( + int inputTokens, + int numModelRequests, + global::G.UsageModerationsResultObject @object, + string? projectId, + string? userId, + string? apiKeyId, + string? model) + { + this.InputTokens = inputTokens; + this.NumModelRequests = numModelRequests; + this.Object = @object; + this.ProjectId = projectId; + this.UserId = userId; + this.ApiKeyId = apiKeyId; + this.Model = model; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageModerationsResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResultObject.g.verified.cs new file mode 100644 index 0000000000..25cf7560e8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageModerationsResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageModerationsResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageModerationsResultObject + { + /// + /// + /// + OrganizationUsageModerationsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageModerationsResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageModerationsResultObject value) + { + return value switch + { + UsageModerationsResultObject.OrganizationUsageModerationsResult => "organization.usage.moderations.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageModerationsResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.moderations.result" => UsageModerationsResultObject.OrganizationUsageModerationsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponse.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponse.Json.g.verified.cs new file mode 100644 index 0000000000..c7f0afeaf7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponse.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageResponse.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageResponse + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageResponse? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageResponse), + jsonSerializerContext) as global::G.UsageResponse; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageResponse? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageResponse), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageResponse; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponse.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponse.g.verified.cs new file mode 100644 index 0000000000..d0c2b9372e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponse.g.verified.cs @@ -0,0 +1,73 @@ +//HintName: G.Models.UsageResponse.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class UsageResponse + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageResponseObjectJsonConverter))] + public global::G.UsageResponseObject Object { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("data")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Data { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("has_more")] + [global::System.Text.Json.Serialization.JsonRequired] + public required bool HasMore { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("next_page")] + [global::System.Text.Json.Serialization.JsonRequired] + public required string NextPage { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageResponse( + global::System.Collections.Generic.IList data, + bool hasMore, + string nextPage, + global::G.UsageResponseObject @object) + { + this.Data = data ?? throw new global::System.ArgumentNullException(nameof(data)); + this.HasMore = hasMore; + this.NextPage = nextPage ?? throw new global::System.ArgumentNullException(nameof(nextPage)); + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageResponse() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponseObject.g.verified.cs similarity index 63% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesResponseObject.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponseObject.g.verified.cs index acb1ae8ee8..27405be539 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.ListFilesResponseObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageResponseObject.g.verified.cs @@ -1,4 +1,4 @@ -//HintName: G.Models.ListFilesResponseObject.g.cs +//HintName: G.Models.UsageResponseObject.g.cs #nullable enable @@ -7,38 +7,38 @@ namespace G /// /// /// - public enum ListFilesResponseObject + public enum UsageResponseObject { /// /// /// - List, + Page, } /// /// Enum extensions to do fast conversions without the reflection. /// - public static class ListFilesResponseObjectExtensions + public static class UsageResponseObjectExtensions { /// /// Converts an enum to a string. /// - public static string ToValueString(this ListFilesResponseObject value) + public static string ToValueString(this UsageResponseObject value) { return value switch { - ListFilesResponseObject.List => "list", + UsageResponseObject.Page => "page", _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), }; } /// /// Converts an string to a enum. /// - public static ListFilesResponseObject? ToEnum(string value) + public static UsageResponseObject? ToEnum(string value) { return value switch { - "list" => ListFilesResponseObject.List, + "page" => UsageResponseObject.Page, _ => null, }; } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucket.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucket.Json.g.verified.cs new file mode 100644 index 0000000000..e556ad36ec --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucket.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageTimeBucket.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageTimeBucket + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageTimeBucket? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageTimeBucket), + jsonSerializerContext) as global::G.UsageTimeBucket; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageTimeBucket? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageTimeBucket), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageTimeBucket; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucket.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucket.g.verified.cs new file mode 100644 index 0000000000..900e3d8229 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucket.g.verified.cs @@ -0,0 +1,73 @@ +//HintName: G.Models.UsageTimeBucket.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class UsageTimeBucket + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageTimeBucketObjectJsonConverter))] + public global::G.UsageTimeBucketObject Object { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("start_time")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int StartTime { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("end_time")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int EndTime { get; set; } + + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("result")] + [global::System.Text.Json.Serialization.JsonRequired] + public required global::System.Collections.Generic.IList Result { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageTimeBucket( + int startTime, + int endTime, + global::System.Collections.Generic.IList result, + global::G.UsageTimeBucketObject @object) + { + this.StartTime = startTime; + this.EndTime = endTime; + this.Result = result ?? throw new global::System.ArgumentNullException(nameof(result)); + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageTimeBucket() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketObject.g.verified.cs new file mode 100644 index 0000000000..152e57de01 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageTimeBucketObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageTimeBucketObject + { + /// + /// + /// + Bucket, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageTimeBucketObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageTimeBucketObject value) + { + return value switch + { + UsageTimeBucketObject.Bucket => "bucket", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageTimeBucketObject? ToEnum(string value) + { + return value switch + { + "bucket" => UsageTimeBucketObject.Bucket, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.verified.cs new file mode 100644 index 0000000000..eb48355b47 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageTimeBucketResultItemDiscriminator.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageTimeBucketResultItemDiscriminator + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageTimeBucketResultItemDiscriminator? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageTimeBucketResultItemDiscriminator), + jsonSerializerContext) as global::G.UsageTimeBucketResultItemDiscriminator; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageTimeBucketResultItemDiscriminator? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageTimeBucketResultItemDiscriminator), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageTimeBucketResultItemDiscriminator; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.g.verified.cs new file mode 100644 index 0000000000..6f2c14efa6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminator.g.verified.cs @@ -0,0 +1,43 @@ +//HintName: G.Models.UsageTimeBucketResultItemDiscriminator.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public sealed partial class UsageTimeBucketResultItemDiscriminator + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageTimeBucketResultItemDiscriminatorObjectJsonConverter))] + public global::G.UsageTimeBucketResultItemDiscriminatorObject? Object { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageTimeBucketResultItemDiscriminator( + global::G.UsageTimeBucketResultItemDiscriminatorObject? @object) + { + this.Object = @object; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageTimeBucketResultItemDiscriminator() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs new file mode 100644 index 0000000000..f414b98714 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs @@ -0,0 +1,94 @@ +//HintName: G.Models.UsageTimeBucketResultItemDiscriminatorObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageTimeBucketResultItemDiscriminatorObject + { + /// + /// + /// + OrganizationUsageCompletionsResult, + /// + /// + /// + OrganizationUsageEmbeddingsResult, + /// + /// + /// + OrganizationUsageModerationsResult, + /// + /// + /// + OrganizationUsageImagesResult, + /// + /// + /// + OrganizationUsageAudioSpeechesResult, + /// + /// + /// + OrganizationUsageAudioTranscriptionsResult, + /// + /// + /// + OrganizationUsageVectorStoresResult, + /// + /// + /// + OrganizationUsageCodeInterpreterSessionsResult, + /// + /// + /// + OrganizationCostsResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageTimeBucketResultItemDiscriminatorObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageTimeBucketResultItemDiscriminatorObject value) + { + return value switch + { + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCompletionsResult => "organization.usage.completions.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageEmbeddingsResult => "organization.usage.embeddings.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageModerationsResult => "organization.usage.moderations.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageImagesResult => "organization.usage.images.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioSpeechesResult => "organization.usage.audio_speeches.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioTranscriptionsResult => "organization.usage.audio_transcriptions.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageVectorStoresResult => "organization.usage.vector_stores.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCodeInterpreterSessionsResult => "organization.usage.code_interpreter_sessions.result", + UsageTimeBucketResultItemDiscriminatorObject.OrganizationCostsResult => "organization.costs.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageTimeBucketResultItemDiscriminatorObject? ToEnum(string value) + { + return value switch + { + "organization.usage.completions.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCompletionsResult, + "organization.usage.embeddings.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageEmbeddingsResult, + "organization.usage.moderations.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageModerationsResult, + "organization.usage.images.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageImagesResult, + "organization.usage.audio_speeches.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioSpeechesResult, + "organization.usage.audio_transcriptions.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioTranscriptionsResult, + "organization.usage.vector_stores.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageVectorStoresResult, + "organization.usage.code_interpreter_sessions.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCodeInterpreterSessionsResult, + "organization.costs.result" => UsageTimeBucketResultItemDiscriminatorObject.OrganizationCostsResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresBucketWidth.g.verified.cs new file mode 100644 index 0000000000..eb5e33b42c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresBucketWidth.g.verified.cs @@ -0,0 +1,58 @@ +//HintName: G.Models.UsageVectorStoresBucketWidth.g.cs + +#nullable enable + +namespace G +{ + /// + /// Default Value: 1d + /// + public enum UsageVectorStoresBucketWidth + { + /// + /// + /// + x1m, + /// + /// + /// + x1h, + /// + /// + /// + x1d, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageVectorStoresBucketWidthExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageVectorStoresBucketWidth value) + { + return value switch + { + UsageVectorStoresBucketWidth.x1m => "1m", + UsageVectorStoresBucketWidth.x1h => "1h", + UsageVectorStoresBucketWidth.x1d => "1d", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageVectorStoresBucketWidth? ToEnum(string value) + { + return value switch + { + "1m" => UsageVectorStoresBucketWidth.x1m, + "1h" => UsageVectorStoresBucketWidth.x1h, + "1d" => UsageVectorStoresBucketWidth.x1d, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresGroupByItem.g.verified.cs new file mode 100644 index 0000000000..0ec84ee26e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresGroupByItem.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageVectorStoresGroupByItem.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageVectorStoresGroupByItem + { + /// + /// + /// + ProjectId, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageVectorStoresGroupByItemExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageVectorStoresGroupByItem value) + { + return value switch + { + UsageVectorStoresGroupByItem.ProjectId => "project_id", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageVectorStoresGroupByItem? ToEnum(string value) + { + return value switch + { + "project_id" => UsageVectorStoresGroupByItem.ProjectId, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResult.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResult.Json.g.verified.cs new file mode 100644 index 0000000000..d550e862d9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResult.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: G.Models.UsageVectorStoresResult.Json.g.cs +#nullable enable + +namespace G +{ + public sealed partial class UsageVectorStoresResult + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.UsageVectorStoresResult? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.UsageVectorStoresResult), + jsonSerializerContext) as global::G.UsageVectorStoresResult; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.UsageVectorStoresResult? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.UsageVectorStoresResult), + jsonSerializerContext).ConfigureAwait(false)) as global::G.UsageVectorStoresResult; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResult.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResult.g.verified.cs new file mode 100644 index 0000000000..4a899ad5eb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResult.g.verified.cs @@ -0,0 +1,66 @@ +//HintName: G.Models.UsageVectorStoresResult.g.cs + +#nullable enable + +namespace G +{ + /// + /// The aggregated vector stores usage details of the specific time bucket. + /// + public sealed partial class UsageVectorStoresResult + { + /// + /// + /// + [global::System.Text.Json.Serialization.JsonPropertyName("object")] + [global::System.Text.Json.Serialization.JsonConverter(typeof(global::G.JsonConverters.UsageVectorStoresResultObjectJsonConverter))] + public global::G.UsageVectorStoresResultObject Object { get; set; } + + /// + /// The vector stores usage in bytes. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("usage_bytes")] + [global::System.Text.Json.Serialization.JsonRequired] + public required int UsageBytes { get; set; } + + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Text.Json.Serialization.JsonPropertyName("project_id")] + public string? ProjectId { get; set; } + + /// + /// Additional properties that are not explicitly defined in the schema + /// + [global::System.Text.Json.Serialization.JsonExtensionData] + public global::System.Collections.Generic.IDictionary AdditionalProperties { get; set; } = new global::System.Collections.Generic.Dictionary(); + + /// + /// Initializes a new instance of the class. + /// + /// + /// + /// The vector stores usage in bytes. + /// + /// + /// When `group_by=project_id`, this field provides the project ID of the grouped usage result. + /// + [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] + public UsageVectorStoresResult( + int usageBytes, + global::G.UsageVectorStoresResultObject @object, + string? projectId) + { + this.UsageBytes = usageBytes; + this.Object = @object; + this.ProjectId = projectId; + } + + /// + /// Initializes a new instance of the class. + /// + public UsageVectorStoresResult() + { + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResultObject.g.verified.cs new file mode 100644 index 0000000000..ea8862df4e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.UsageVectorStoresResultObject.g.verified.cs @@ -0,0 +1,46 @@ +//HintName: G.Models.UsageVectorStoresResultObject.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public enum UsageVectorStoresResultObject + { + /// + /// + /// + OrganizationUsageVectorStoresResult, + } + + /// + /// Enum extensions to do fast conversions without the reflection. + /// + public static class UsageVectorStoresResultObjectExtensions + { + /// + /// Converts an enum to a string. + /// + public static string ToValueString(this UsageVectorStoresResultObject value) + { + return value switch + { + UsageVectorStoresResultObject.OrganizationUsageVectorStoresResult => "organization.usage.vector_stores.result", + _ => throw new global::System.ArgumentOutOfRangeException(nameof(value), value, null), + }; + } + /// + /// Converts an string to a enum. + /// + public static UsageVectorStoresResultObject? ToEnum(string value) + { + return value switch + { + "organization.usage.vector_stores.result" => UsageVectorStoresResultObject.OrganizationUsageVectorStoresResult, + _ => null, + }; + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObject.g.verified.cs index 11746d3d07..1ac8ba9d49 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObject.g.verified.cs @@ -82,7 +82,7 @@ public sealed partial class VectorStoreObject public required global::System.DateTimeOffset? LastActiveAt { get; set; } /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Text.Json.Serialization.JsonPropertyName("metadata")] [global::System.Text.Json.Serialization.JsonRequired] @@ -126,7 +126,7 @@ public sealed partial class VectorStoreObject /// The Unix timestamp (in seconds) for when the vector store was last active. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// [global::System.Diagnostics.CodeAnalysis.SetsRequiredMembers] public VectorStoreObject( diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs index d75ce94227..52ad7e3298 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.Models.VectorStoreObjectMetadata.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// public sealed partial class VectorStoreObjectMetadata { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModelsClient.RetrieveModel.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModelsClient.RetrieveModel.g.verified.cs index e5aa056eeb..05120c82d5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModelsClient.RetrieveModel.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModelsClient.RetrieveModel.g.verified.cs @@ -30,7 +30,7 @@ partial void ProcessRetrieveModelResponseContent( /// /// The token to cancel the operation with /// - public async global::System.Threading.Tasks.Task RetrieveModelAsync( + public async global::System.Threading.Tasks.Task RetrieveModelAsync( string model, global::System.Threading.CancellationToken cancellationToken = default) { @@ -121,7 +121,7 @@ partial void ProcessRetrieveModelResponseContent( } return - global::G.Model12.FromJson(__content, JsonSerializerContext) ?? + global::G.Model15.FromJson(__content, JsonSerializerContext) ?? throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); } else @@ -147,7 +147,7 @@ partial void ProcessRetrieveModelResponseContent( using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); return - await global::G.Model12.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + await global::G.Model15.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? throw new global::System.InvalidOperationException("Response deserialization failed."); } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.CreateModeration.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.CreateModeration.g.verified.cs index 224c40751d..b6263c88fe 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.CreateModeration.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.CreateModeration.g.verified.cs @@ -23,7 +23,8 @@ partial void ProcessCreateModerationResponseContent( ref string content); /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// /// The token to cancel the operation with @@ -159,21 +160,24 @@ partial void ProcessCreateModerationResponseContent( } /// - /// Classifies if text is potentially harmful. + /// Classifies if text and/or image inputs are potentially harmful. Learn
+ /// more in the [moderation guide](/docs/guides/moderation). ///
/// - /// The input text to classify + /// Input (or inputs) to classify. Can be a single string, an array of strings, or
+ /// an array of multi-modal input objects similar to other models. /// /// - /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
- /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- /// Default Value: text-moderation-latest
- /// Example: text-moderation-stable + /// The content moderation model you would like to use. Learn more in
+ /// [the moderation guide](/docs/guides/moderation), and learn about
+ /// available models [here](/docs/models#moderation).
+ /// Default Value: omni-moderation-latest
+ /// Example: omni-moderation-2024-09-26 /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task CreateModerationAsync( - global::G.OneOf> input, + global::G.OneOf, global::System.Collections.Generic.IList> input, global::G.AnyOf? model = default, global::System.Threading.CancellationToken cancellationToken = default) { diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.g.verified.cs index c06be3e524..b2c6e7d2e0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ModerationsClient.g.verified.cs @@ -5,7 +5,7 @@ namespace G { /// - /// Given a input text, outputs if the model classifies it as potentially harmful.
+ /// Given text and/or image inputs, classifies if those inputs are potentially harmful.
/// If no httpClient is provided, a new one will be created.
/// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. ///
diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.OpenAiClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.OpenAiClient.g.verified.cs index 0f8bd25621..aa4f4f4e8d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.OpenAiClient.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.OpenAiClient.g.verified.cs @@ -138,7 +138,7 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst }; /// - /// Given a input text, outputs if the model classifies it as potentially harmful. + /// Given text and/or image inputs, classifies if those inputs are potentially harmful. /// public ModerationsClient Moderations => new ModerationsClient(HttpClient, authorizations: Authorizations) { @@ -158,7 +158,7 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst /// /// /// - public VectorStoresClient VectorStores => new VectorStoresClient(HttpClient, authorizations: Authorizations) + public UsageClient Usage => new UsageClient(HttpClient, authorizations: Authorizations) { ReadResponseAsString = ReadResponseAsString, JsonSerializerContext = JsonSerializerContext, @@ -173,6 +173,15 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst JsonSerializerContext = JsonSerializerContext, }; + /// + /// + /// + public ProjectsClient Projects => new ProjectsClient(HttpClient, authorizations: Authorizations) + { + ReadResponseAsString = ReadResponseAsString, + JsonSerializerContext = JsonSerializerContext, + }; + /// /// /// @@ -185,7 +194,7 @@ public sealed partial class OpenAiClient : global::G.IOpenAiClient, global::Syst /// /// /// - public ProjectsClient Projects => new ProjectsClient(HttpClient, authorizations: Authorizations) + public VectorStoresClient VectorStores => new VectorStoresClient(HttpClient, authorizations: Authorizations) { ReadResponseAsString = ReadResponseAsString, JsonSerializerContext = JsonSerializerContext, diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ListProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ListProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..ce022e699a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ListProjectRateLimits.g.verified.cs @@ -0,0 +1,178 @@ +//HintName: G.ProjectsClient.ListProjectRateLimits.g.cs + +#nullable enable + +namespace G +{ + public partial class ProjectsClient + { + partial void PrepareListProjectRateLimitsArguments( + global::System.Net.Http.HttpClient httpClient, + ref string projectId, + ref int? limit, + ref string? after, + ref string? before); + partial void PrepareListProjectRateLimitsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, + int? limit, + string? after, + string? before); + partial void ProcessListProjectRateLimitsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessListProjectRateLimitsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Returns the rate limits per model for a project. + /// + /// + /// + /// Default Value: 100 + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task ListProjectRateLimitsAsync( + string projectId, + int? limit = default, + string? after = default, + string? before = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareListProjectRateLimitsArguments( + httpClient: HttpClient, + projectId: ref projectId, + limit: ref limit, + after: ref after, + before: ref before); + + var __pathBuilder = new PathBuilder( + path: $"/organization/projects/{projectId}/rate_limits", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("after", after) + .AddOptionalParameter("before", before) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareListProjectRateLimitsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + projectId: projectId, + limit: limit, + after: after, + before: before); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessListProjectRateLimitsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessListProjectRateLimitsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.ProjectRateLimitListResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.ProjectRateLimitListResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProject.g.verified.cs index 68564a3cbe..122302af3b 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProject.g.verified.cs @@ -8,10 +8,12 @@ public partial class ProjectsClient { partial void PrepareModifyProjectArguments( global::System.Net.Http.HttpClient httpClient, + ref string projectId, global::G.ProjectUpdateRequest request); partial void PrepareModifyProjectRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, global::G.ProjectUpdateRequest request); partial void ProcessModifyProjectResponse( global::System.Net.Http.HttpClient httpClient, @@ -25,10 +27,12 @@ partial void ProcessModifyProjectResponseContent( /// /// Modifies a project in the organization. /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, global::G.ProjectUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -38,10 +42,11 @@ partial void ProcessModifyProjectResponseContent( client: HttpClient); PrepareModifyProjectArguments( httpClient: HttpClient, + projectId: ref projectId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/projects/{project_id}", + path: $"/organization/projects/{projectId}", baseUri: HttpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -80,6 +85,7 @@ partial void ProcessModifyProjectResponseContent( PrepareModifyProjectRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, + projectId: projectId, request: request); using var __response = await HttpClient.SendAsync( @@ -189,12 +195,14 @@ partial void ProcessModifyProjectResponseContent( /// /// Modifies a project in the organization. /// + /// /// /// The updated name of the project, this name appears in reports. /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectAsync( + string projectId, string name, global::System.Threading.CancellationToken cancellationToken = default) { @@ -204,6 +212,7 @@ partial void ProcessModifyProjectResponseContent( }; return await ModifyProjectAsync( + projectId: projectId, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs index 74b387fbb0..93bae0e7e6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.ModifyProjectUser.g.verified.cs @@ -8,10 +8,14 @@ public partial class ProjectsClient { partial void PrepareModifyProjectUserArguments( global::System.Net.Http.HttpClient httpClient, + ref string projectId, + ref string userId, global::G.ProjectUserUpdateRequest request); partial void PrepareModifyProjectUserRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, + string userId, global::G.ProjectUserUpdateRequest request); partial void ProcessModifyProjectUserResponse( global::System.Net.Http.HttpClient httpClient, @@ -25,10 +29,14 @@ partial void ProcessModifyProjectUserResponseContent( /// /// Modifies a user's role in the project. /// + /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -38,10 +46,12 @@ partial void ProcessModifyProjectUserResponseContent( client: HttpClient); PrepareModifyProjectUserArguments( httpClient: HttpClient, + projectId: ref projectId, + userId: ref userId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/projects/{project_id}/users/{user_id}", + path: $"/organization/projects/{projectId}/users/{userId}", baseUri: HttpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -80,6 +90,8 @@ partial void ProcessModifyProjectUserResponseContent( PrepareModifyProjectUserRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, + projectId: projectId, + userId: userId, request: request); using var __response = await HttpClient.SendAsync( @@ -189,12 +201,16 @@ partial void ProcessModifyProjectUserResponseContent( /// /// Modifies a user's role in the project. /// + /// + /// /// /// `owner` or `member` /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyProjectUserAsync( + string projectId, + string userId, global::G.ProjectUserUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default) { @@ -204,6 +220,8 @@ partial void ProcessModifyProjectUserResponseContent( }; return await ModifyProjectUserAsync( + projectId: projectId, + userId: userId, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.UpdateProjectRateLimits.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.UpdateProjectRateLimits.g.verified.cs new file mode 100644 index 0000000000..7124085bf4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.ProjectsClient.UpdateProjectRateLimits.g.verified.cs @@ -0,0 +1,254 @@ +//HintName: G.ProjectsClient.UpdateProjectRateLimits.g.cs + +#nullable enable + +namespace G +{ + public partial class ProjectsClient + { + partial void PrepareUpdateProjectRateLimitsArguments( + global::System.Net.Http.HttpClient httpClient, + ref string projectId, + ref string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request); + partial void PrepareUpdateProjectRateLimitsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string projectId, + string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request); + partial void ProcessUpdateProjectRateLimitsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUpdateProjectRateLimitsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + global::G.ProjectRateLimitUpdateRequest request, + global::System.Threading.CancellationToken cancellationToken = default) + { + request = request ?? throw new global::System.ArgumentNullException(nameof(request)); + + PrepareArguments( + client: HttpClient); + PrepareUpdateProjectRateLimitsArguments( + httpClient: HttpClient, + projectId: ref projectId, + rateLimitId: ref rateLimitId, + request: request); + + var __pathBuilder = new PathBuilder( + path: $"/organization/projects/{projectId}/rate_limits/{rateLimitId}", + baseUri: HttpClient.BaseAddress); + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Post, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + var __httpRequestContentBody = request.ToJson(JsonSerializerContext); + var __httpRequestContent = new global::System.Net.Http.StringContent( + content: __httpRequestContentBody, + encoding: global::System.Text.Encoding.UTF8, + mediaType: "application/json"); + __httpRequest.Content = __httpRequestContent; + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUpdateProjectRateLimitsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + projectId: projectId, + rateLimitId: rateLimitId, + request: request); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUpdateProjectRateLimitsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + // Error response for various conditions. + if ((int)__response.StatusCode == 400) + { + string? __content_400 = null; + global::G.ErrorResponse? __value_400 = null; + if (ReadResponseAsString) + { + __content_400 = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + __value_400 = global::G.ErrorResponse.FromJson(__content_400, JsonSerializerContext); + } + else + { + var __contentStream_400 = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + __value_400 = await global::G.ErrorResponse.FromJsonStreamAsync(__contentStream_400, JsonSerializerContext).ConfigureAwait(false); + } + + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + statusCode: __response.StatusCode) + { + ResponseBody = __content_400, + ResponseObject = __value_400, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUpdateProjectRateLimitsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.ProjectRateLimit.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.ProjectRateLimit.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + + /// + /// Updates a project rate limit. + /// + /// + /// + /// + /// The maximum requests per minute. + /// + /// + /// The maximum tokens per minute. + /// + /// + /// The maximum images per minute. Only relevant for certain models. + /// + /// + /// The maximum audio megabytes per minute. Only relevant for certain models. + /// + /// + /// The maximum requests per day. Only relevant for certain models. + /// + /// + /// The maximum batch input tokens per day. Only relevant for certain models. + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UpdateProjectRateLimitsAsync( + string projectId, + string rateLimitId, + int? maxRequestsPer1Minute = default, + int? maxTokensPer1Minute = default, + int? maxImagesPer1Minute = default, + int? maxAudioMegabytesPer1Minute = default, + int? maxRequestsPer1Day = default, + int? batch1DayMaxInputTokens = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + var __request = new global::G.ProjectRateLimitUpdateRequest + { + MaxRequestsPer1Minute = maxRequestsPer1Minute, + MaxTokensPer1Minute = maxTokensPer1Minute, + MaxImagesPer1Minute = maxImagesPer1Minute, + MaxAudioMegabytesPer1Minute = maxAudioMegabytesPer1Minute, + MaxRequestsPer1Day = maxRequestsPer1Day, + Batch1DayMaxInputTokens = batch1DayMaxInputTokens, + }; + + return await UpdateProjectRateLimitsAsync( + projectId: projectId, + rateLimitId: rateLimitId, + request: __request, + cancellationToken: cancellationToken).ConfigureAwait(false); + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UploadsClient.CreateUpload.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UploadsClient.CreateUpload.g.verified.cs index 839d460eec..0047c99b93 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UploadsClient.CreateUpload.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UploadsClient.CreateUpload.g.verified.cs @@ -26,7 +26,7 @@ partial void ProcessCreateUploadResponseContent( /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). ///
/// @@ -166,7 +166,7 @@ partial void ProcessCreateUploadResponseContent( /// Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.
/// Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.
/// For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:
- /// - [Assistants](/docs/assistants/tools/file-search/supported-files)
+ /// - [Assistants](/docs/assistants/tools/file-search#supported-files)
/// For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). /// /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageAudioSpeeches.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageAudioSpeeches.g.verified.cs new file mode 100644 index 0000000000..3ce897a105 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageAudioSpeeches.g.verified.cs @@ -0,0 +1,220 @@ +//HintName: G.UsageClient.UsageAudioSpeeches.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageAudioSpeechesArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageAudioSpeechesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageAudioSpeechesRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageAudioSpeechesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageAudioSpeechesResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageAudioSpeechesResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get audio speeches usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageAudioSpeechesAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioSpeechesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageAudioSpeechesArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/audio_speeches", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageAudioSpeechesRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageAudioSpeechesResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageAudioSpeechesResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageAudioTranscriptions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageAudioTranscriptions.g.verified.cs new file mode 100644 index 0000000000..0e5657d159 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageAudioTranscriptions.g.verified.cs @@ -0,0 +1,220 @@ +//HintName: G.UsageClient.UsageAudioTranscriptions.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageAudioTranscriptionsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageAudioTranscriptionsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageAudioTranscriptionsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageAudioTranscriptionsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get audio transcriptions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageAudioTranscriptionsAsync( + int startTime, + int? endTime = default, + global::G.UsageAudioTranscriptionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageAudioTranscriptionsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/audio_transcriptions", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageAudioTranscriptionsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageAudioTranscriptionsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageAudioTranscriptionsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCodeInterpreterSessions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCodeInterpreterSessions.g.verified.cs new file mode 100644 index 0000000000..fb2f2b34a0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCodeInterpreterSessions.g.verified.cs @@ -0,0 +1,199 @@ +//HintName: G.UsageClient.UsageCodeInterpreterSessions.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageCodeInterpreterSessionsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageCodeInterpreterSessionsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageCodeInterpreterSessionsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageCodeInterpreterSessionsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get code interpreter sessions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageCodeInterpreterSessionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCodeInterpreterSessionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageCodeInterpreterSessionsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/code_interpreter_sessions", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageCodeInterpreterSessionsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageCodeInterpreterSessionsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageCodeInterpreterSessionsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCompletions.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCompletions.g.verified.cs new file mode 100644 index 0000000000..fa29a24a04 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCompletions.g.verified.cs @@ -0,0 +1,227 @@ +//HintName: G.UsageClient.UsageCompletions.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageCompletionsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageCompletionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + ref bool? batch, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageCompletionsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageCompletionsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + bool? batch, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageCompletionsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageCompletionsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get completions usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageCompletionsAsync( + int startTime, + int? endTime = default, + global::G.UsageCompletionsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + bool? batch = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageCompletionsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + batch: ref batch, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/completions", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("batch", batch?.ToString()) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageCompletionsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + batch: batch, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageCompletionsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageCompletionsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCosts.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCosts.g.verified.cs new file mode 100644 index 0000000000..ac221cf203 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageCosts.g.verified.cs @@ -0,0 +1,201 @@ +//HintName: G.UsageClient.UsageCosts.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageCostsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageCostsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageCostsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageCostsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageCostsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageCostsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get costs details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// Default Value: 7 + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageCostsAsync( + int startTime, + int? endTime = default, + global::G.UsageCostsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageCostsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/costs", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageCostsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageCostsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageCostsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageEmbeddings.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageEmbeddings.g.verified.cs new file mode 100644 index 0000000000..8577a0e97d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageEmbeddings.g.verified.cs @@ -0,0 +1,220 @@ +//HintName: G.UsageClient.UsageEmbeddings.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageEmbeddingsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageEmbeddingsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageEmbeddingsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageEmbeddingsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageEmbeddingsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageEmbeddingsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get embeddings usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageEmbeddingsAsync( + int startTime, + int? endTime = default, + global::G.UsageEmbeddingsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageEmbeddingsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/embeddings", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageEmbeddingsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageEmbeddingsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageEmbeddingsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageImages.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageImages.g.verified.cs new file mode 100644 index 0000000000..ca2fedbe64 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageImages.g.verified.cs @@ -0,0 +1,232 @@ +//HintName: G.UsageClient.UsageImages.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageImagesArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageImagesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? sources, + global::System.Collections.Generic.IList? sizes, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageImagesRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageImagesBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? sources, + global::System.Collections.Generic.IList? sizes, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageImagesResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageImagesResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get images usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageImagesAsync( + int startTime, + int? endTime = default, + global::G.UsageImagesBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? sources = default, + global::System.Collections.Generic.IList? sizes = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageImagesArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + sources: sources, + sizes: sizes, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/images", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageImagesRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + sources: sources, + sizes: sizes, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageImagesResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageImagesResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageModerations.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageModerations.g.verified.cs new file mode 100644 index 0000000000..17fb6ff51d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageModerations.g.verified.cs @@ -0,0 +1,220 @@ +//HintName: G.UsageClient.UsageModerations.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageModerationsArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageModerationsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageModerationsRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageModerationsBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? userIds, + global::System.Collections.Generic.IList? apiKeyIds, + global::System.Collections.Generic.IList? models, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageModerationsResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageModerationsResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get moderations usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageModerationsAsync( + int startTime, + int? endTime = default, + global::G.UsageModerationsBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? userIds = default, + global::System.Collections.Generic.IList? apiKeyIds = default, + global::System.Collections.Generic.IList? models = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageModerationsArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/moderations", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("user_ids", userIds, delimiter: ",", explode: true) + .AddOptionalParameter("api_key_ids", apiKeyIds, delimiter: ",", explode: true) + .AddOptionalParameter("models", models, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageModerationsRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + userIds: userIds, + apiKeyIds: apiKeyIds, + models: models, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageModerationsResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageModerationsResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageVectorStores.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageVectorStores.g.verified.cs new file mode 100644 index 0000000000..73f3e0f19b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.UsageVectorStores.g.verified.cs @@ -0,0 +1,199 @@ +//HintName: G.UsageClient.UsageVectorStores.g.cs + +#nullable enable + +namespace G +{ + public partial class UsageClient + { + partial void PrepareUsageVectorStoresArguments( + global::System.Net.Http.HttpClient httpClient, + ref int startTime, + ref int? endTime, + ref global::G.UsageVectorStoresBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + ref int? limit, + ref string? page); + partial void PrepareUsageVectorStoresRequest( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpRequestMessage httpRequestMessage, + int startTime, + int? endTime, + global::G.UsageVectorStoresBucketWidth? bucketWidth, + global::System.Collections.Generic.IList? projectIds, + global::System.Collections.Generic.IList? groupBy, + int? limit, + string? page); + partial void ProcessUsageVectorStoresResponse( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage); + + partial void ProcessUsageVectorStoresResponseContent( + global::System.Net.Http.HttpClient httpClient, + global::System.Net.Http.HttpResponseMessage httpResponseMessage, + ref string content); + + /// + /// Get vector stores usage details for the organization. + /// + /// + /// + /// + /// Default Value: 1d + /// + /// + /// + /// + /// + /// The token to cancel the operation with + /// + public async global::System.Threading.Tasks.Task UsageVectorStoresAsync( + int startTime, + int? endTime = default, + global::G.UsageVectorStoresBucketWidth? bucketWidth = default, + global::System.Collections.Generic.IList? projectIds = default, + global::System.Collections.Generic.IList? groupBy = default, + int? limit = default, + string? page = default, + global::System.Threading.CancellationToken cancellationToken = default) + { + PrepareArguments( + client: HttpClient); + PrepareUsageVectorStoresArguments( + httpClient: HttpClient, + startTime: ref startTime, + endTime: ref endTime, + bucketWidth: ref bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: ref limit, + page: ref page); + + var __pathBuilder = new PathBuilder( + path: "/organization/usage/vector_stores", + baseUri: HttpClient.BaseAddress); + __pathBuilder + .AddRequiredParameter("start_time", startTime.ToString()) + .AddOptionalParameter("end_time", endTime?.ToString()) + .AddOptionalParameter("bucket_width", bucketWidth?.ToValueString()) + .AddOptionalParameter("project_ids", projectIds, delimiter: ",", explode: true) + .AddOptionalParameter("limit", limit?.ToString()) + .AddOptionalParameter("page", page) + ; + var __path = __pathBuilder.ToString(); + using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( + method: global::System.Net.Http.HttpMethod.Get, + requestUri: new global::System.Uri(__path, global::System.UriKind.RelativeOrAbsolute)); +#if NET6_0_OR_GREATER + __httpRequest.Version = global::System.Net.HttpVersion.Version11; + __httpRequest.VersionPolicy = global::System.Net.Http.HttpVersionPolicy.RequestVersionOrHigher; +#endif + + foreach (var __authorization in Authorizations) + { + if (__authorization.Type == "Http" || + __authorization.Type == "OAuth2") + { + __httpRequest.Headers.Authorization = new global::System.Net.Http.Headers.AuthenticationHeaderValue( + scheme: __authorization.Name, + parameter: __authorization.Value); + } + else if (__authorization.Type == "ApiKey" && + __authorization.Location == "Header") + { + __httpRequest.Headers.Add(__authorization.Name, __authorization.Value); + } + } + + PrepareRequest( + client: HttpClient, + request: __httpRequest); + PrepareUsageVectorStoresRequest( + httpClient: HttpClient, + httpRequestMessage: __httpRequest, + startTime: startTime, + endTime: endTime, + bucketWidth: bucketWidth, + projectIds: projectIds, + groupBy: groupBy, + limit: limit, + page: page); + + using var __response = await HttpClient.SendAsync( + request: __httpRequest, + completionOption: global::System.Net.Http.HttpCompletionOption.ResponseContentRead, + cancellationToken: cancellationToken).ConfigureAwait(false); + + ProcessResponse( + client: HttpClient, + response: __response); + ProcessUsageVectorStoresResponse( + httpClient: HttpClient, + httpResponseMessage: __response); + + if (ReadResponseAsString) + { + var __content = await __response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); + + ProcessResponseContent( + client: HttpClient, + response: __response, + content: ref __content); + ProcessUsageVectorStoresResponseContent( + httpClient: HttpClient, + httpResponseMessage: __response, + content: ref __content); + + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __content ?? __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseBody = __content, + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + return + global::G.UsageResponse.FromJson(__content, JsonSerializerContext) ?? + throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" "); + } + else + { + try + { + __response.EnsureSuccessStatusCode(); + } + catch (global::System.Net.Http.HttpRequestException __ex) + { + throw new global::G.ApiException( + message: __response.ReasonPhrase ?? string.Empty, + innerException: __ex, + statusCode: __response.StatusCode) + { + ResponseHeaders = global::System.Linq.Enumerable.ToDictionary( + __response.Headers, + h => h.Key, + h => h.Value), + }; + } + + using var __content = await __response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false); + + return + await global::G.UsageResponse.FromJsonStreamAsync(__content, JsonSerializerContext).ConfigureAwait(false) ?? + throw new global::System.InvalidOperationException("Response deserialization failed."); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.g.verified.cs new file mode 100644 index 0000000000..d16efa9181 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsageClient.g.verified.cs @@ -0,0 +1,87 @@ +//HintName: G.UsageClient.g.cs + +#nullable enable + +namespace G +{ + /// + /// If no httpClient is provided, a new one will be created.
+ /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + ///
+ public sealed partial class UsageClient : global::G.IUsageClient, global::System.IDisposable + { + /// + /// + /// + public const string DefaultBaseUrl = "https://api.openai.com/v1"; + + private bool _disposeHttpClient = true; + + /// + public global::System.Net.Http.HttpClient HttpClient { get; } + + /// + public System.Uri? BaseUri => HttpClient.BaseAddress; + + /// + public global::System.Collections.Generic.List Authorizations { get; } + + /// + public bool ReadResponseAsString { get; set; } +#if DEBUG + = true; +#endif + /// + /// + /// + public global::System.Text.Json.Serialization.JsonSerializerContext JsonSerializerContext { get; set; } = global::G.SourceGenerationContext.Default; + + + /// + /// Creates a new instance of the UsageClient. + /// If no httpClient is provided, a new one will be created. + /// If no baseUri is provided, the default baseUri from OpenAPI spec will be used. + /// + /// The HttpClient instance. If not provided, a new one will be created. + /// The base URL for the API. If not provided, the default baseUri from OpenAPI spec will be used. + /// The authorizations to use for the requests. + /// Dispose the HttpClient when the instance is disposed. True by default. + public UsageClient( + global::System.Net.Http.HttpClient? httpClient = null, + global::System.Uri? baseUri = null, + global::System.Collections.Generic.List? authorizations = null, + bool disposeHttpClient = true) + { + HttpClient = httpClient ?? new global::System.Net.Http.HttpClient(); + HttpClient.BaseAddress ??= baseUri ?? new global::System.Uri(DefaultBaseUrl); + Authorizations = authorizations ?? new global::System.Collections.Generic.List(); + _disposeHttpClient = disposeHttpClient; + + Initialized(HttpClient); + } + + /// + public void Dispose() + { + if (_disposeHttpClient) + { + HttpClient.Dispose(); + } + } + + partial void Initialized( + global::System.Net.Http.HttpClient client); + partial void PrepareArguments( + global::System.Net.Http.HttpClient client); + partial void PrepareRequest( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpRequestMessage request); + partial void ProcessResponse( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response); + partial void ProcessResponseContent( + global::System.Net.Http.HttpClient client, + global::System.Net.Http.HttpResponseMessage response, + ref string content); + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsersClient.ModifyUser.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsersClient.ModifyUser.g.verified.cs index cbfc44c630..ef732e8199 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsersClient.ModifyUser.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.UsersClient.ModifyUser.g.verified.cs @@ -8,10 +8,12 @@ public partial class UsersClient { partial void PrepareModifyUserArguments( global::System.Net.Http.HttpClient httpClient, + ref string userId, global::G.UserRoleUpdateRequest request); partial void PrepareModifyUserRequest( global::System.Net.Http.HttpClient httpClient, global::System.Net.Http.HttpRequestMessage httpRequestMessage, + string userId, global::G.UserRoleUpdateRequest request); partial void ProcessModifyUserResponse( global::System.Net.Http.HttpClient httpClient, @@ -25,10 +27,12 @@ partial void ProcessModifyUserResponseContent( /// /// Modifies a user's role in the organization. /// + /// /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequest request, global::System.Threading.CancellationToken cancellationToken = default) { @@ -38,10 +42,11 @@ partial void ProcessModifyUserResponseContent( client: HttpClient); PrepareModifyUserArguments( httpClient: HttpClient, + userId: ref userId, request: request); var __pathBuilder = new PathBuilder( - path: "/organization/users/{user_id}", + path: $"/organization/users/{userId}", baseUri: HttpClient.BaseAddress); var __path = __pathBuilder.ToString(); using var __httpRequest = new global::System.Net.Http.HttpRequestMessage( @@ -80,6 +85,7 @@ partial void ProcessModifyUserResponseContent( PrepareModifyUserRequest( httpClient: HttpClient, httpRequestMessage: __httpRequest, + userId: userId, request: request); using var __response = await HttpClient.SendAsync( @@ -161,12 +167,14 @@ partial void ProcessModifyUserResponseContent( /// /// Modifies a user's role in the organization. /// + /// /// /// `owner` or `reader` /// /// The token to cancel the operation with /// public async global::System.Threading.Tasks.Task ModifyUserAsync( + string userId, global::G.UserRoleUpdateRequestRole role, global::System.Threading.CancellationToken cancellationToken = default) { @@ -176,6 +184,7 @@ partial void ProcessModifyUserResponseContent( }; return await ModifyUserAsync( + userId: userId, request: __request, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs index 174dafa967..7a3a4e340a 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.CreateVectorStore.g.verified.cs @@ -175,7 +175,7 @@ partial void ProcessCreateVectorStoreResponseContent( /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs index 3084df5b3c..d0390321bc 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#G.VectorStoresClient.ModifyVectorStore.g.verified.cs @@ -176,7 +176,7 @@ partial void ProcessModifyVectorStoreResponseContent( /// The expiration policy for a vector store. /// /// - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. /// /// The token to cancel the operation with /// diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.AudioResponseFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.AudioResponseFormat.g.verified.cs new file mode 100644 index 0000000000..83a519c209 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.AudioResponseFormat.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.AudioResponseFormat.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class AudioResponseFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.AudioResponseFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.AudioResponseFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.AudioResponseFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.AudioResponseFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.AudioResponseFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.AudioResponseFormatNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.AudioResponseFormatNullable.g.verified.cs new file mode 100644 index 0000000000..5a846e9126 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.AudioResponseFormatNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.AudioResponseFormatNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class AudioResponseFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.AudioResponseFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.AudioResponseFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.AudioResponseFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.AudioResponseFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.AudioResponseFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionModalitie.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionModalitie.g.verified.cs new file mode 100644 index 0000000000..ece466ffff --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionModalitie.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.ChatCompletionModalitie.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ChatCompletionModalitieJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ChatCompletionModalitie Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ChatCompletionModalitieExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ChatCompletionModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ChatCompletionModalitie value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.ChatCompletionModalitieExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesResponseObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionModalitieNullable.g.verified.cs similarity index 74% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesResponseObjectNullable.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionModalitieNullable.g.verified.cs index 6801aa9589..ad718604a5 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesResponseObjectNullable.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionModalitieNullable.g.verified.cs @@ -1,13 +1,13 @@ -//HintName: JsonConverters.ListFilesResponseObjectNullable.g.cs +//HintName: JsonConverters.ChatCompletionModalitieNullable.g.cs #nullable enable namespace G.JsonConverters { /// - public sealed class ListFilesResponseObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class ChatCompletionModalitieNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::G.ListFilesResponseObject? Read( + public override global::G.ChatCompletionModalitie? Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -19,7 +19,7 @@ public sealed class ListFilesResponseObjectNullableJsonConverter : global::Syste var stringValue = reader.GetString(); if (stringValue != null) { - return global::G.ListFilesResponseObjectExtensions.ToEnum(stringValue); + return global::G.ChatCompletionModalitieExtensions.ToEnum(stringValue); } break; @@ -27,7 +27,7 @@ public sealed class ListFilesResponseObjectNullableJsonConverter : global::Syste case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::G.ListFilesResponseObject)numValue; + return (global::G.ChatCompletionModalitie)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -39,7 +39,7 @@ public sealed class ListFilesResponseObjectNullableJsonConverter : global::Syste /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::G.ListFilesResponseObject? value, + global::G.ChatCompletionModalitie? value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); @@ -50,7 +50,7 @@ public override void Write( } else { - writer.WriteStringValue(global::G.ListFilesResponseObjectExtensions.ToValueString(value.Value)); + writer.WriteStringValue(global::G.ChatCompletionModalitieExtensions.ToValueString(value.Value)); } } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs new file mode 100644 index 0000000000..add5f68818 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormat.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioInputAudioFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullable.g.verified.cs new file mode 100644 index 0000000000..4ab48581da --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs new file mode 100644 index 0000000000..1af4a507a1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.ChatCompletionRequestMessageContentPartAudioType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ChatCompletionRequestMessageContentPartAudioType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ChatCompletionRequestMessageContentPartAudioType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ChatCompletionRequestMessageContentPartAudioType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullable.g.verified.cs new file mode 100644 index 0000000000..3d0108b4ea --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ChatCompletionRequestMessageContentPartAudioTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ChatCompletionRequestMessageContentPartAudioType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ChatCompletionRequestMessageContentPartAudioType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ChatCompletionRequestMessageContentPartAudioType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.ChatCompletionRequestMessageContentPartAudioTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestUserMessageContentPart.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestUserMessageContentPart.g.verified.cs index e68427c391..20f9606328 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestUserMessageContentPart.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ChatCompletionRequestUserMessageContentPart.g.verified.cs @@ -36,11 +36,19 @@ public class ChatCompletionRequestUserMessageContentPartJsonConverter : global:: throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.ChatCompletionRequestMessageContentPartImage)}"); imageUrl = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } + global::G.ChatCompletionRequestMessageContentPartAudio? inputAudio = default; + if (discriminator?.Type == global::G.ChatCompletionRequestUserMessageContentPartDiscriminatorType.InputAudio) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.ChatCompletionRequestMessageContentPartAudio), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.ChatCompletionRequestMessageContentPartAudio)}"); + inputAudio = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } var result = new global::G.ChatCompletionRequestUserMessageContentPart( discriminator?.Type, text, - imageUrl + imageUrl, + inputAudio ); return result; @@ -67,6 +75,12 @@ public override void Write( throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.ChatCompletionRequestMessageContentPartImage).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageUrl, typeInfo); } + else if (value.IsInputAudio) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.ChatCompletionRequestMessageContentPartAudio), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.ChatCompletionRequestMessageContentPartAudio).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.InputAudio, typeInfo); + } } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem.g.verified.cs index b77588da15..0af3d4d631 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem.g.verified.cs @@ -18,45 +18,45 @@ public class ContentItemJsonConverter : global::System.Text.Json.Serialization.J var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageObjectContentItemDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageObjectContentItemDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaObjectDeltaContentItemDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaObjectDeltaContentItemDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); - global::G.MessageContentImageFileObject? imageFile = default; - if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.ImageFile) + global::G.MessageDeltaContentImageFileObject? imageFile = default; + if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.ImageFile) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentImageFileObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentImageFileObject)}"); imageFile = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.MessageContentImageUrlObject? imageUrl = default; - if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.ImageUrl) + global::G.MessageDeltaContentTextObject? text = default; + if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.Text) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentImageUrlObject)}"); - imageUrl = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); - } - global::G.MessageContentTextObject? text = default; - if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.Text) - { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentTextObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentTextObject)}"); text = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.MessageContentRefusalObject? refusal = default; - if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.Refusal) + global::G.MessageDeltaContentRefusalObject? refusal = default; + if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.Refusal) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentRefusalObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentRefusalObject)}"); refusal = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } + global::G.MessageDeltaContentImageUrlObject? imageUrl = default; + if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.ImageUrl) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentImageUrlObject)}"); + imageUrl = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } var result = new global::G.ContentItem( discriminator?.Type, imageFile, - imageUrl, text, - refusal + refusal, + imageUrl ); return result; @@ -73,28 +73,28 @@ public override void Write( if (value.IsImageFile) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentImageFileObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentImageFileObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageFile, typeInfo); } - else if (value.IsImageUrl) - { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentImageUrlObject).Name}"); - global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageUrl, typeInfo); - } else if (value.IsText) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentTextObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentTextObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.Text, typeInfo); } else if (value.IsRefusal) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentRefusalObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentRefusalObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.Refusal, typeInfo); } + else if (value.IsImageUrl) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentImageUrlObject).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageUrl, typeInfo); + } } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem2.g.verified.cs index 0d50944f9b..f1e0fb84ce 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem2.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ContentItem2.g.verified.cs @@ -18,45 +18,45 @@ public class ContentItem2JsonConverter : global::System.Text.Json.Serialization. var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaObjectDeltaContentItemDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaObjectDeltaContentItemDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageObjectContentItemDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageObjectContentItemDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); - global::G.MessageDeltaContentImageFileObject? imageFile = default; - if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.ImageFile) + global::G.MessageContentImageFileObject? imageFile = default; + if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.ImageFile) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentImageFileObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentImageFileObject)}"); imageFile = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.MessageDeltaContentTextObject? text = default; - if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.Text) + global::G.MessageContentImageUrlObject? imageUrl = default; + if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.ImageUrl) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentTextObject)}"); - text = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentImageUrlObject)}"); + imageUrl = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.MessageDeltaContentRefusalObject? refusal = default; - if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.Refusal) + global::G.MessageContentTextObject? text = default; + if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.Text) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentRefusalObject)}"); - refusal = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentTextObject)}"); + text = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.MessageDeltaContentImageUrlObject? imageUrl = default; - if (discriminator?.Type == global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType.ImageUrl) + global::G.MessageContentRefusalObject? refusal = default; + if (discriminator?.Type == global::G.MessageObjectContentItemDiscriminatorType.Refusal) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageDeltaContentImageUrlObject)}"); - imageUrl = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageContentRefusalObject)}"); + refusal = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } var result = new global::G.ContentItem2( discriminator?.Type, imageFile, + imageUrl, text, - refusal, - imageUrl + refusal ); return result; @@ -73,28 +73,28 @@ public override void Write( if (value.IsImageFile) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentImageFileObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageFileObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentImageFileObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageFile, typeInfo); } + else if (value.IsImageUrl) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentImageUrlObject).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageUrl, typeInfo); + } else if (value.IsText) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentTextObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentTextObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentTextObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.Text, typeInfo); } else if (value.IsRefusal) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentRefusalObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageContentRefusalObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageContentRefusalObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.Refusal, typeInfo); } - else if (value.IsImageUrl) - { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageDeltaContentImageUrlObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.MessageDeltaContentImageUrlObject).Name}"); - global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageUrl, typeInfo); - } } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CostsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CostsResultObject.g.verified.cs new file mode 100644 index 0000000000..d636f890e3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CostsResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CostsResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CostsResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CostsResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CostsResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CostsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CostsResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CostsResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CostsResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CostsResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..6d8dba7fd9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CostsResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CostsResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CostsResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CostsResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CostsResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CostsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CostsResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CostsResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioFormat.g.verified.cs new file mode 100644 index 0000000000..af767ae29f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioFormat.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateChatCompletionRequestAudioFormat.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateChatCompletionRequestAudioFormat Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateChatCompletionRequestAudioFormatExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateChatCompletionRequestAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateChatCompletionRequestAudioFormat value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateChatCompletionRequestAudioFormatExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioFormatNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioFormatNullable.g.verified.cs new file mode 100644 index 0000000000..66914fd8c1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioFormatNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateChatCompletionRequestAudioFormatNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateChatCompletionRequestAudioFormat? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateChatCompletionRequestAudioFormatExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateChatCompletionRequestAudioFormat)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateChatCompletionRequestAudioFormat? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateChatCompletionRequestAudioFormatExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioVoice.g.verified.cs new file mode 100644 index 0000000000..61069b6550 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioVoice.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateChatCompletionRequestAudioVoice.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioVoiceJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateChatCompletionRequestAudioVoice Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateChatCompletionRequestAudioVoiceExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateChatCompletionRequestAudioVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateChatCompletionRequestAudioVoice value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateChatCompletionRequestAudioVoiceExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioVoiceNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioVoiceNullable.g.verified.cs new file mode 100644 index 0000000000..2f2e268424 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateChatCompletionRequestAudioVoiceNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateChatCompletionRequestAudioVoiceNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateChatCompletionRequestAudioVoiceNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateChatCompletionRequestAudioVoice? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateChatCompletionRequestAudioVoiceExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateChatCompletionRequestAudioVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateChatCompletionRequestAudioVoice? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateChatCompletionRequestAudioVoiceExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs new file mode 100644 index 0000000000..608fe302b3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationRequestInputVariant3ItemDiscriminatorTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationRequestInputVariant3ItemDiscriminatorTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationRequestInputVariant3ItemDiscriminatorTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorTypeNullable.g.verified.cs new file mode 100644 index 0000000000..0f5061de6a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationRequestInputVariant3ItemDiscriminatorTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationRequestInputVariant3ItemDiscriminatorTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationRequestInputVariant3ItemDiscriminatorTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs new file mode 100644 index 0000000000..5a4cb08f43 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant1Type.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationRequestInputVariant3ItemVariant1Type.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationRequestInputVariant3ItemVariant1TypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationRequestInputVariant3ItemVariant1Type Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationRequestInputVariant3ItemVariant1TypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationRequestInputVariant3ItemVariant1Type)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationRequestInputVariant3ItemVariant1Type value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationRequestInputVariant3ItemVariant1TypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeNullable.g.verified.cs new file mode 100644 index 0000000000..781f96d2ee --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationRequestInputVariant3ItemVariant1TypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationRequestInputVariant3ItemVariant1Type? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationRequestInputVariant3ItemVariant1TypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationRequestInputVariant3ItemVariant1Type)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationRequestInputVariant3ItemVariant1Type? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationRequestInputVariant3ItemVariant1TypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs new file mode 100644 index 0000000000..e72caa8c45 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant2Type.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationRequestInputVariant3ItemVariant2Type.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationRequestInputVariant3ItemVariant2TypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationRequestInputVariant3ItemVariant2Type Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationRequestInputVariant3ItemVariant2TypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationRequestInputVariant3ItemVariant2Type)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationRequestInputVariant3ItemVariant2Type value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationRequestInputVariant3ItemVariant2TypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeNullable.g.verified.cs new file mode 100644 index 0000000000..a8a8e409fa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationRequestInputVariant3ItemVariant2TypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationRequestInputVariant3ItemVariant2Type? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationRequestInputVariant3ItemVariant2TypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationRequestInputVariant3ItemVariant2Type)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationRequestInputVariant3ItemVariant2Type? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationRequestInputVariant3ItemVariant2TypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs new file mode 100644 index 0000000000..0a1abf8f3c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemNullable.g.verified.cs new file mode 100644 index 0000000000..c7e72192a8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs new file mode 100644 index 0000000000..333f23e160 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemNullable.g.verified.cs new file mode 100644 index 0000000000..d92f830fe2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs new file mode 100644 index 0000000000..7e8bd50db6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHateItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItemNullable.g.verified.cs new file mode 100644 index 0000000000..0c430f47ea --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHateItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs new file mode 100644 index 0000000000..03e38cdf2b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemNullable.g.verified.cs new file mode 100644 index 0000000000..6e0788196b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs new file mode 100644 index 0000000000..828696d991 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemNullable.g.verified.cs new file mode 100644 index 0000000000..01d0cf8ddb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs new file mode 100644 index 0000000000..0813685261 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemNullable.g.verified.cs new file mode 100644 index 0000000000..2ec8f66142 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs new file mode 100644 index 0000000000..947f56bcf4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionNullable.g.verified.cs new file mode 100644 index 0000000000..27a4d0d5e1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs new file mode 100644 index 0000000000..e893bcc5b2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemNullable.g.verified.cs new file mode 100644 index 0000000000..da60a7fd24 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs new file mode 100644 index 0000000000..f29a90f029 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemNullable.g.verified.cs new file mode 100644 index 0000000000..3b93d859ca --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs new file mode 100644 index 0000000000..0763555c79 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSexualItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemNullable.g.verified.cs new file mode 100644 index 0000000000..0f2b2b5ec0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSexualItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs new file mode 100644 index 0000000000..d9618d7e40 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorNullable.g.verified.cs new file mode 100644 index 0000000000..52e44179f0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs new file mode 100644 index 0000000000..56eb55aba5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemNullable.g.verified.cs new file mode 100644 index 0000000000..7d9a965dc3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs new file mode 100644 index 0000000000..3c196d8f49 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemNullable.g.verified.cs new file mode 100644 index 0000000000..ba3c713bdf --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateRunIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateRunIncludeItem.g.verified.cs new file mode 100644 index 0000000000..d26c7e2140 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateRunIncludeItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.CreateRunIncludeItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateRunIncludeItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateRunIncludeItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateRunIncludeItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateRunIncludeItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateRunIncludeItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.CreateRunIncludeItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateRunIncludeItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateRunIncludeItemNullable.g.verified.cs new file mode 100644 index 0000000000..f554e8cb6a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateRunIncludeItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.CreateRunIncludeItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class CreateRunIncludeItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.CreateRunIncludeItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.CreateRunIncludeItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.CreateRunIncludeItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.CreateRunIncludeItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.CreateRunIncludeItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.FileSearchRankingOptionsRanker.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.FileSearchRankingOptionsRanker.g.verified.cs new file mode 100644 index 0000000000..cf9c788966 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.FileSearchRankingOptionsRanker.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.FileSearchRankingOptionsRanker.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class FileSearchRankingOptionsRankerJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.FileSearchRankingOptionsRanker Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.FileSearchRankingOptionsRankerExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.FileSearchRankingOptionsRanker)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.FileSearchRankingOptionsRanker value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.FileSearchRankingOptionsRankerExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.FileSearchRankingOptionsRankerNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.FileSearchRankingOptionsRankerNullable.g.verified.cs new file mode 100644 index 0000000000..786a1be500 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.FileSearchRankingOptionsRankerNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.FileSearchRankingOptionsRankerNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class FileSearchRankingOptionsRankerNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.FileSearchRankingOptionsRanker? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.FileSearchRankingOptionsRankerExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.FileSearchRankingOptionsRanker)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.FileSearchRankingOptionsRanker? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.FileSearchRankingOptionsRankerExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.GetRunStepIncludeItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.GetRunStepIncludeItem.g.verified.cs new file mode 100644 index 0000000000..586fe52def --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.GetRunStepIncludeItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.GetRunStepIncludeItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class GetRunStepIncludeItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.GetRunStepIncludeItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.GetRunStepIncludeItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.GetRunStepIncludeItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.GetRunStepIncludeItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.GetRunStepIncludeItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.GetRunStepIncludeItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.GetRunStepIncludeItemNullable.g.verified.cs new file mode 100644 index 0000000000..4affa2e365 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.GetRunStepIncludeItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.GetRunStepIncludeItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class GetRunStepIncludeItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.GetRunStepIncludeItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.GetRunStepIncludeItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.GetRunStepIncludeItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.GetRunStepIncludeItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.GetRunStepIncludeItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.InputVariant3Item.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.InputVariant3Item.g.verified.cs new file mode 100644 index 0000000000..2775b750bc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.InputVariant3Item.g.verified.cs @@ -0,0 +1,72 @@ +//HintName: JsonConverters.InputVariant3Item.g.cs +#nullable enable +#pragma warning disable CS0618 // Type or member is obsolete + +namespace G.JsonConverters +{ + /// + public class InputVariant3ItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.InputVariant3Item Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + options = options ?? throw new global::System.ArgumentNullException(nameof(options)); + var typeInfoResolver = options.TypeInfoResolver ?? throw new global::System.InvalidOperationException("TypeInfoResolver is not set."); + + + var readerCopy = reader; + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateModerationRequestInputVariant3ItemDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateModerationRequestInputVariant3ItemDiscriminator)}"); + var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); + + global::G.CreateModerationRequestInputVariant3ItemVariant1? imageUrl = default; + if (discriminator?.Type == global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType.ImageUrl) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateModerationRequestInputVariant3ItemVariant1)}"); + imageUrl = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.CreateModerationRequestInputVariant3ItemVariant2? text = default; + if (discriminator?.Type == global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType.Text) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateModerationRequestInputVariant3ItemVariant2), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateModerationRequestInputVariant3ItemVariant2)}"); + text = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + + var result = new global::G.InputVariant3Item( + discriminator?.Type, + imageUrl, + text + ); + + return result; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.InputVariant3Item value, + global::System.Text.Json.JsonSerializerOptions options) + { + options = options ?? throw new global::System.ArgumentNullException(nameof(options)); + var typeInfoResolver = options.TypeInfoResolver ?? throw new global::System.InvalidOperationException("TypeInfoResolver is not set."); + + if (value.IsImageUrl) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.CreateModerationRequestInputVariant3ItemVariant1).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.ImageUrl, typeInfo); + } + else if (value.IsText) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateModerationRequestInputVariant3ItemVariant2), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.CreateModerationRequestInputVariant3ItemVariant2).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Text, typeInfo); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesOrder.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesOrder.g.verified.cs new file mode 100644 index 0000000000..413e4fa34c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesOrder.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.ListFilesOrder.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ListFilesOrderJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ListFilesOrder Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ListFilesOrderExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ListFilesOrder)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ListFilesOrder value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.ListFilesOrderExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesOrderNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesOrderNullable.g.verified.cs new file mode 100644 index 0000000000..6ecc1750da --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesOrderNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.ListFilesOrderNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ListFilesOrderNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ListFilesOrder? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ListFilesOrderExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ListFilesOrder)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ListFilesOrder? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.ListFilesOrderExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListRunStepsIncludeItem.g.verified.cs similarity index 73% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesResponseObject.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListRunStepsIncludeItem.g.verified.cs index 7b979af718..2bfadf551c 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListFilesResponseObject.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListRunStepsIncludeItem.g.verified.cs @@ -1,13 +1,13 @@ -//HintName: JsonConverters.ListFilesResponseObject.g.cs +//HintName: JsonConverters.ListRunStepsIncludeItem.g.cs #nullable enable namespace G.JsonConverters { /// - public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class ListRunStepsIncludeItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::G.ListFilesResponseObject Read( + public override global::G.ListRunStepsIncludeItem Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -19,7 +19,7 @@ public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.J var stringValue = reader.GetString(); if (stringValue != null) { - return global::G.ListFilesResponseObjectExtensions.ToEnum(stringValue) ?? default; + return global::G.ListRunStepsIncludeItemExtensions.ToEnum(stringValue) ?? default; } break; @@ -27,7 +27,7 @@ public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.J case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::G.ListFilesResponseObject)numValue; + return (global::G.ListRunStepsIncludeItem)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -39,12 +39,12 @@ public sealed class ListFilesResponseObjectJsonConverter : global::System.Text.J /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::G.ListFilesResponseObject value, + global::G.ListRunStepsIncludeItem value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - writer.WriteStringValue(global::G.ListFilesResponseObjectExtensions.ToValueString(value)); + writer.WriteStringValue(global::G.ListRunStepsIncludeItemExtensions.ToValueString(value)); } } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListRunStepsIncludeItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListRunStepsIncludeItemNullable.g.verified.cs new file mode 100644 index 0000000000..b486ceff6e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ListRunStepsIncludeItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.ListRunStepsIncludeItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ListRunStepsIncludeItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ListRunStepsIncludeItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ListRunStepsIncludeItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ListRunStepsIncludeItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ListRunStepsIncludeItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.ListRunStepsIncludeItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.OneOf3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.OneOf3.g.verified.cs new file mode 100644 index 0000000000..864e847ed1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.OneOf3.g.verified.cs @@ -0,0 +1,112 @@ +//HintName: JsonConverters.OneOf3.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public class OneOfJsonConverter : global::System.Text.Json.Serialization.JsonConverter> + { + /// + public override global::G.OneOf Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + options = options ?? throw new global::System.ArgumentNullException(nameof(options)); + var typeInfoResolver = options.TypeInfoResolver ?? throw new global::System.InvalidOperationException("TypeInfoResolver is not set."); + + var + readerCopy = reader; + T1? value1 = default; + try + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T1), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T1).Name}"); + value1 = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, typeInfo); + } + catch (global::System.Text.Json.JsonException) + { + } + + readerCopy = reader; + T2? value2 = default; + try + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T2), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T2).Name}"); + value2 = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, typeInfo); + } + catch (global::System.Text.Json.JsonException) + { + } + + readerCopy = reader; + T3? value3 = default; + try + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T3), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T3).Name}"); + value3 = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, typeInfo); + } + catch (global::System.Text.Json.JsonException) + { + } + + var result = new global::G.OneOf( + value1, + value2, + value3 + ); + + if (value1 != null) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T1), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T1).Name}"); + _ = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + else if (value2 != null) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T2), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T2).Name}"); + _ = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + else if (value3 != null) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T3), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T3).Name}"); + _ = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + + return result; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.OneOf value, + global::System.Text.Json.JsonSerializerOptions options) + { + options = options ?? throw new global::System.ArgumentNullException(nameof(options)); + var typeInfoResolver = options.TypeInfoResolver ?? throw new global::System.InvalidOperationException("TypeInfoResolver is not set."); + + if (value.IsValue1) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T1), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T1).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Value1, typeInfo); + } + else if (value.IsValue2) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T2), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T2).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Value2, typeInfo); + } + else if (value.IsValue3) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(T3), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(T3).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Value3, typeInfo); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.PredictionContentType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.PredictionContentType.g.verified.cs new file mode 100644 index 0000000000..a6b5ebdb6d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.PredictionContentType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.PredictionContentType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class PredictionContentTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.PredictionContentType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.PredictionContentTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.PredictionContentType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.PredictionContentType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.PredictionContentTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.PredictionContentTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.PredictionContentTypeNullable.g.verified.cs new file mode 100644 index 0000000000..c01a6e3c95 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.PredictionContentTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.PredictionContentTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class PredictionContentTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.PredictionContentType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.PredictionContentTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.PredictionContentType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.PredictionContentType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.PredictionContentTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitListResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitListResponseObject.g.verified.cs new file mode 100644 index 0000000000..a311f57809 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitListResponseObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.ProjectRateLimitListResponseObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ProjectRateLimitListResponseObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ProjectRateLimitListResponseObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ProjectRateLimitListResponseObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ProjectRateLimitListResponseObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ProjectRateLimitListResponseObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.ProjectRateLimitListResponseObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitListResponseObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitListResponseObjectNullable.g.verified.cs new file mode 100644 index 0000000000..1a0778f4b0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitListResponseObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.ProjectRateLimitListResponseObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ProjectRateLimitListResponseObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ProjectRateLimitListResponseObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ProjectRateLimitListResponseObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ProjectRateLimitListResponseObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ProjectRateLimitListResponseObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.ProjectRateLimitListResponseObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitObject.g.verified.cs new file mode 100644 index 0000000000..73acf3db3c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.ProjectRateLimitObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ProjectRateLimitObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ProjectRateLimitObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ProjectRateLimitObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ProjectRateLimitObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ProjectRateLimitObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.ProjectRateLimitObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitObjectNullable.g.verified.cs new file mode 100644 index 0000000000..f1e8f4aeec --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ProjectRateLimitObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.ProjectRateLimitObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class ProjectRateLimitObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ProjectRateLimitObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.ProjectRateLimitObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.ProjectRateLimitObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ProjectRateLimitObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.ProjectRateLimitObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemCreateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemCreateType.g.verified.cs new file mode 100644 index 0000000000..553ecbf2e5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemCreateType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventConversationItemCreateType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventConversationItemCreateTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventConversationItemCreateType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventConversationItemCreateTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventConversationItemCreateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventConversationItemCreateType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventConversationItemCreateTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemCreateTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemCreateTypeNullable.g.verified.cs new file mode 100644 index 0000000000..649a8cb5d0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemCreateTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventConversationItemCreateTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventConversationItemCreateTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventConversationItemCreateType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventConversationItemCreateTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventConversationItemCreateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventConversationItemCreateType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventConversationItemCreateTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemDeleteType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemDeleteType.g.verified.cs new file mode 100644 index 0000000000..71b629067f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemDeleteType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventConversationItemDeleteType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventConversationItemDeleteTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventConversationItemDeleteType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventConversationItemDeleteTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventConversationItemDeleteType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventConversationItemDeleteType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventConversationItemDeleteTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemDeleteTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemDeleteTypeNullable.g.verified.cs new file mode 100644 index 0000000000..1e2d725ecd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemDeleteTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventConversationItemDeleteTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventConversationItemDeleteTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventConversationItemDeleteType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventConversationItemDeleteTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventConversationItemDeleteType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventConversationItemDeleteType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventConversationItemDeleteTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemTruncateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemTruncateType.g.verified.cs new file mode 100644 index 0000000000..fe6aa2071f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemTruncateType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventConversationItemTruncateType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventConversationItemTruncateTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventConversationItemTruncateType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventConversationItemTruncateTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventConversationItemTruncateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventConversationItemTruncateType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventConversationItemTruncateTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemTruncateTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemTruncateTypeNullable.g.verified.cs new file mode 100644 index 0000000000..cf993c841a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventConversationItemTruncateTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventConversationItemTruncateTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventConversationItemTruncateTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventConversationItemTruncateType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventConversationItemTruncateTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventConversationItemTruncateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventConversationItemTruncateType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventConversationItemTruncateTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs new file mode 100644 index 0000000000..f9998e09e0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferAppendType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventInputAudioBufferAppendType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventInputAudioBufferAppendTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventInputAudioBufferAppendType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventInputAudioBufferAppendTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventInputAudioBufferAppendType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventInputAudioBufferAppendType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventInputAudioBufferAppendTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferAppendTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferAppendTypeNullable.g.verified.cs new file mode 100644 index 0000000000..51a5a6ea49 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferAppendTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventInputAudioBufferAppendTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventInputAudioBufferAppendTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventInputAudioBufferAppendType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventInputAudioBufferAppendTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventInputAudioBufferAppendType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventInputAudioBufferAppendType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventInputAudioBufferAppendTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferClearType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferClearType.g.verified.cs new file mode 100644 index 0000000000..f86a30dbc8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferClearType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventInputAudioBufferClearType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventInputAudioBufferClearTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventInputAudioBufferClearType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventInputAudioBufferClearTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventInputAudioBufferClearType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventInputAudioBufferClearType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventInputAudioBufferClearTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferClearTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferClearTypeNullable.g.verified.cs new file mode 100644 index 0000000000..5f5f6cf86a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferClearTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventInputAudioBufferClearTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventInputAudioBufferClearTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventInputAudioBufferClearType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventInputAudioBufferClearTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventInputAudioBufferClearType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventInputAudioBufferClearType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventInputAudioBufferClearTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs new file mode 100644 index 0000000000..07d8ce4b83 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferCommitType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventInputAudioBufferCommitType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventInputAudioBufferCommitTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventInputAudioBufferCommitType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventInputAudioBufferCommitTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventInputAudioBufferCommitType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventInputAudioBufferCommitType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventInputAudioBufferCommitTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferCommitTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferCommitTypeNullable.g.verified.cs new file mode 100644 index 0000000000..5f20965b3c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventInputAudioBufferCommitTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventInputAudioBufferCommitTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventInputAudioBufferCommitTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventInputAudioBufferCommitType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventInputAudioBufferCommitTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventInputAudioBufferCommitType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventInputAudioBufferCommitType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventInputAudioBufferCommitTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCancelType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCancelType.g.verified.cs new file mode 100644 index 0000000000..8318eda0d6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCancelType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventResponseCancelType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventResponseCancelTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventResponseCancelType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventResponseCancelTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventResponseCancelType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventResponseCancelType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventResponseCancelTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCancelTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCancelTypeNullable.g.verified.cs new file mode 100644 index 0000000000..5039069d2d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCancelTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventResponseCancelTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventResponseCancelTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventResponseCancelType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventResponseCancelTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventResponseCancelType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventResponseCancelType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventResponseCancelTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCreateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCreateType.g.verified.cs new file mode 100644 index 0000000000..0465ba6afd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCreateType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventResponseCreateType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventResponseCreateTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventResponseCreateType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventResponseCreateTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventResponseCreateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventResponseCreateType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventResponseCreateTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCreateTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCreateTypeNullable.g.verified.cs new file mode 100644 index 0000000000..224900c3e8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventResponseCreateTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventResponseCreateTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventResponseCreateTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventResponseCreateType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventResponseCreateTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventResponseCreateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventResponseCreateType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventResponseCreateTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventSessionUpdateType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventSessionUpdateType.g.verified.cs new file mode 100644 index 0000000000..bfe9f26a84 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventSessionUpdateType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeClientEventSessionUpdateType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventSessionUpdateTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventSessionUpdateType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventSessionUpdateTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventSessionUpdateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventSessionUpdateType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeClientEventSessionUpdateTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventSessionUpdateTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventSessionUpdateTypeNullable.g.verified.cs new file mode 100644 index 0000000000..6d4b572f0b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeClientEventSessionUpdateTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeClientEventSessionUpdateTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeClientEventSessionUpdateTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeClientEventSessionUpdateType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeClientEventSessionUpdateTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeClientEventSessionUpdateType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeClientEventSessionUpdateType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeClientEventSessionUpdateTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemContentItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemContentItemType.g.verified.cs new file mode 100644 index 0000000000..50d54c1b78 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemContentItemType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeConversationItemContentItemType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemContentItemTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemContentItemType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemContentItemTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemContentItemType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemContentItemType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeConversationItemContentItemTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemContentItemTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemContentItemTypeNullable.g.verified.cs new file mode 100644 index 0000000000..3acada68ce --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemContentItemTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeConversationItemContentItemTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemContentItemTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemContentItemType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemContentItemTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemContentItemType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemContentItemType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeConversationItemContentItemTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemObject.g.verified.cs new file mode 100644 index 0000000000..f101c24b9e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeConversationItemObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeConversationItemObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemObjectNullable.g.verified.cs new file mode 100644 index 0000000000..34c91665a2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeConversationItemObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeConversationItemObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemRole.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemRole.g.verified.cs new file mode 100644 index 0000000000..412c700a83 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemRole.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeConversationItemRole.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemRoleJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemRole Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemRoleExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemRole)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemRole value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeConversationItemRoleExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemRoleNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemRoleNullable.g.verified.cs new file mode 100644 index 0000000000..57c848a539 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemRoleNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeConversationItemRoleNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemRoleNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemRole? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemRoleExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemRole)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemRole? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeConversationItemRoleExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemStatus.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemStatus.g.verified.cs new file mode 100644 index 0000000000..c9822fa396 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemStatus.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeConversationItemStatus.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemStatusJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemStatus Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemStatusExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemStatus)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemStatus value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeConversationItemStatusExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemStatusNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemStatusNullable.g.verified.cs new file mode 100644 index 0000000000..e608fae32e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemStatusNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeConversationItemStatusNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemStatusNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemStatus? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemStatusExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemStatus)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemStatus? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeConversationItemStatusExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemType.g.verified.cs new file mode 100644 index 0000000000..2a1ad59afb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeConversationItemType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeConversationItemTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemTypeNullable.g.verified.cs new file mode 100644 index 0000000000..25b204945c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeConversationItemTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeConversationItemTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeConversationItemTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeConversationItemType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeConversationItemTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeConversationItemType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeConversationItemType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeConversationItemTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseObject.g.verified.cs new file mode 100644 index 0000000000..1264217f83 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeResponseObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeResponseObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseObjectNullable.g.verified.cs new file mode 100644 index 0000000000..a8d20efac5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeResponseObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeResponseObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatus.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatus.g.verified.cs new file mode 100644 index 0000000000..35890f9598 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatus.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeResponseStatus.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseStatusJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseStatus Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseStatusExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseStatus)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseStatus value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeResponseStatusExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsReason.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsReason.g.verified.cs new file mode 100644 index 0000000000..4d87191139 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsReason.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeResponseStatusDetailsReason.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseStatusDetailsReasonJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseStatusDetailsReason Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseStatusDetailsReasonExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseStatusDetailsReason)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseStatusDetailsReason value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeResponseStatusDetailsReasonExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsReasonNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsReasonNullable.g.verified.cs new file mode 100644 index 0000000000..e86b96b6ca --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsReasonNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeResponseStatusDetailsReasonNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseStatusDetailsReasonNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseStatusDetailsReason? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseStatusDetailsReasonExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseStatusDetailsReason)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseStatusDetailsReason? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeResponseStatusDetailsReasonExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsType.g.verified.cs new file mode 100644 index 0000000000..d5b50975e1 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeResponseStatusDetailsType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseStatusDetailsTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseStatusDetailsType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseStatusDetailsTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseStatusDetailsType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseStatusDetailsType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeResponseStatusDetailsTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsTypeNullable.g.verified.cs new file mode 100644 index 0000000000..9487ee25ae --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusDetailsTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeResponseStatusDetailsTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseStatusDetailsTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseStatusDetailsType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseStatusDetailsTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseStatusDetailsType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseStatusDetailsType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeResponseStatusDetailsTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusNullable.g.verified.cs new file mode 100644 index 0000000000..09c8155557 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeResponseStatusNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeResponseStatusNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeResponseStatusNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeResponseStatus? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeResponseStatusExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeResponseStatus)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeResponseStatus? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeResponseStatusExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationCreatedType.g.verified.cs new file mode 100644 index 0000000000..95c76f54b9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationCreatedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventConversationCreatedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationCreatedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationCreatedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationCreatedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationCreatedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventConversationCreatedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationCreatedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationCreatedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..95cd18a139 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationCreatedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventConversationCreatedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationCreatedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationCreatedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationCreatedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationCreatedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventConversationCreatedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemCreatedType.g.verified.cs new file mode 100644 index 0000000000..dd819978db --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemCreatedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemCreatedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemCreatedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemCreatedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemCreatedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemCreatedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemCreatedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemCreatedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemCreatedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..26538491e9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemCreatedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemCreatedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemCreatedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemCreatedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemCreatedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemCreatedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemCreatedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemDeletedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemDeletedType.g.verified.cs new file mode 100644 index 0000000000..4daa34b2db --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemDeletedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemDeletedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemDeletedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemDeletedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemDeletedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemDeletedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemDeletedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemDeletedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemDeletedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemDeletedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..223ac86c43 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemDeletedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemDeletedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemDeletedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemDeletedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemDeletedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemDeletedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemDeletedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemDeletedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs new file mode 100644 index 0000000000..2fbc445fbc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..0fede5d9b9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs new file mode 100644 index 0000000000..05606d3057 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..6860945aed --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemTruncatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemTruncatedType.g.verified.cs new file mode 100644 index 0000000000..9de75782a4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemTruncatedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemTruncatedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemTruncatedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemTruncatedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemTruncatedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemTruncatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemTruncatedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemTruncatedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemTruncatedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemTruncatedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..ede1bd4e42 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventConversationItemTruncatedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventConversationItemTruncatedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventConversationItemTruncatedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventConversationItemTruncatedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventConversationItemTruncatedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventConversationItemTruncatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventConversationItemTruncatedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventConversationItemTruncatedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventErrorType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventErrorType.g.verified.cs new file mode 100644 index 0000000000..1ee0d7de38 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventErrorType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventErrorType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventErrorTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventErrorType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventErrorTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventErrorType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventErrorType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventErrorTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventErrorTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventErrorTypeNullable.g.verified.cs new file mode 100644 index 0000000000..045c3cca7c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventErrorTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventErrorTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventErrorTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventErrorType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventErrorTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventErrorType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventErrorType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventErrorTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs new file mode 100644 index 0000000000..b7caa6e785 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferClearedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferClearedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferClearedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferClearedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferClearedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferClearedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferClearedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferClearedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferClearedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferClearedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..56ff4c9455 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferClearedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferClearedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferClearedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferClearedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferClearedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferClearedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferClearedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferClearedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs new file mode 100644 index 0000000000..3e8cc3500a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferCommittedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferCommittedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferCommittedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferCommittedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferCommittedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferCommittedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferCommittedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferCommittedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferCommittedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferCommittedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..8aab620062 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferCommittedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferCommittedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferCommittedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferCommittedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferCommittedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferCommittedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferCommittedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferCommittedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs new file mode 100644 index 0000000000..ff3af32a3a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferSpeechStartedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferSpeechStartedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferSpeechStartedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferSpeechStartedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferSpeechStartedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferSpeechStartedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..73133840bf --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferSpeechStartedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferSpeechStartedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferSpeechStartedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferSpeechStartedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferSpeechStartedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferSpeechStartedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs new file mode 100644 index 0000000000..34a8c5f3f6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferSpeechStoppedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferSpeechStoppedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferSpeechStoppedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..1760eb956e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventInputAudioBufferSpeechStoppedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventInputAudioBufferSpeechStoppedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventInputAudioBufferSpeechStoppedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateTranscriptionRequestResponseFormat.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs similarity index 66% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateTranscriptionRequestResponseFormat.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs index 6630149428..bd0a3b7237 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateTranscriptionRequestResponseFormat.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventRateLimitsUpdatedType.g.verified.cs @@ -1,13 +1,13 @@ -//HintName: JsonConverters.CreateTranscriptionRequestResponseFormat.g.cs +//HintName: JsonConverters.RealtimeServerEventRateLimitsUpdatedType.g.cs #nullable enable namespace G.JsonConverters { /// - public sealed class CreateTranscriptionRequestResponseFormatJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class RealtimeServerEventRateLimitsUpdatedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::G.CreateTranscriptionRequestResponseFormat Read( + public override global::G.RealtimeServerEventRateLimitsUpdatedType Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -19,7 +19,7 @@ public sealed class CreateTranscriptionRequestResponseFormatJsonConverter : glob var stringValue = reader.GetString(); if (stringValue != null) { - return global::G.CreateTranscriptionRequestResponseFormatExtensions.ToEnum(stringValue) ?? default; + return global::G.RealtimeServerEventRateLimitsUpdatedTypeExtensions.ToEnum(stringValue) ?? default; } break; @@ -27,7 +27,7 @@ public sealed class CreateTranscriptionRequestResponseFormatJsonConverter : glob case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::G.CreateTranscriptionRequestResponseFormat)numValue; + return (global::G.RealtimeServerEventRateLimitsUpdatedType)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -39,12 +39,12 @@ public sealed class CreateTranscriptionRequestResponseFormatJsonConverter : glob /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::G.CreateTranscriptionRequestResponseFormat value, + global::G.RealtimeServerEventRateLimitsUpdatedType value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); - writer.WriteStringValue(global::G.CreateTranscriptionRequestResponseFormatExtensions.ToValueString(value)); + writer.WriteStringValue(global::G.RealtimeServerEventRateLimitsUpdatedTypeExtensions.ToValueString(value)); } } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateTranscriptionRequestResponseFormatNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeNullable.g.verified.cs similarity index 68% rename from src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateTranscriptionRequestResponseFormatNullable.g.verified.cs rename to src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeNullable.g.verified.cs index 8b1ab1c3b1..d6c198e2a1 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.CreateTranscriptionRequestResponseFormatNullable.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeNullable.g.verified.cs @@ -1,13 +1,13 @@ -//HintName: JsonConverters.CreateTranscriptionRequestResponseFormatNullable.g.cs +//HintName: JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeNullable.g.cs #nullable enable namespace G.JsonConverters { /// - public sealed class CreateTranscriptionRequestResponseFormatNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + public sealed class RealtimeServerEventRateLimitsUpdatedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter { /// - public override global::G.CreateTranscriptionRequestResponseFormat? Read( + public override global::G.RealtimeServerEventRateLimitsUpdatedType? Read( ref global::System.Text.Json.Utf8JsonReader reader, global::System.Type typeToConvert, global::System.Text.Json.JsonSerializerOptions options) @@ -19,7 +19,7 @@ public sealed class CreateTranscriptionRequestResponseFormatNullableJsonConverte var stringValue = reader.GetString(); if (stringValue != null) { - return global::G.CreateTranscriptionRequestResponseFormatExtensions.ToEnum(stringValue); + return global::G.RealtimeServerEventRateLimitsUpdatedTypeExtensions.ToEnum(stringValue); } break; @@ -27,7 +27,7 @@ public sealed class CreateTranscriptionRequestResponseFormatNullableJsonConverte case global::System.Text.Json.JsonTokenType.Number: { var numValue = reader.GetInt32(); - return (global::G.CreateTranscriptionRequestResponseFormat)numValue; + return (global::G.RealtimeServerEventRateLimitsUpdatedType)numValue; } default: throw new global::System.ArgumentOutOfRangeException(nameof(reader)); @@ -39,7 +39,7 @@ public sealed class CreateTranscriptionRequestResponseFormatNullableJsonConverte /// public override void Write( global::System.Text.Json.Utf8JsonWriter writer, - global::G.CreateTranscriptionRequestResponseFormat? value, + global::G.RealtimeServerEventRateLimitsUpdatedType? value, global::System.Text.Json.JsonSerializerOptions options) { writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); @@ -50,7 +50,7 @@ public override void Write( } else { - writer.WriteStringValue(global::G.CreateTranscriptionRequestResponseFormatExtensions.ToValueString(value.Value)); + writer.WriteStringValue(global::G.RealtimeServerEventRateLimitsUpdatedTypeExtensions.ToValueString(value.Value)); } } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDeltaType.g.verified.cs new file mode 100644 index 0000000000..6b92f3c87e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDeltaType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioDeltaType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioDeltaTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioDeltaType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioDeltaTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioDeltaType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioDeltaTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDeltaTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDeltaTypeNullable.g.verified.cs new file mode 100644 index 0000000000..1c1dd8b717 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDeltaTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioDeltaTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioDeltaTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioDeltaType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioDeltaTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioDeltaType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioDeltaTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDoneType.g.verified.cs new file mode 100644 index 0000000000..4c04f5e67a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDoneType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioDoneType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioDoneTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioDoneType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioDoneTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioDoneType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioDoneTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDoneTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDoneTypeNullable.g.verified.cs new file mode 100644 index 0000000000..c114c01ffb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioDoneTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioDoneTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioDoneTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioDoneType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioDoneTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioDoneType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioDoneTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs new file mode 100644 index 0000000000..b99b6b4e2e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioTranscriptDeltaTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioTranscriptDeltaType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioTranscriptDeltaTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioTranscriptDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioTranscriptDeltaType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioTranscriptDeltaTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaTypeNullable.g.verified.cs new file mode 100644 index 0000000000..07069b10f7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioTranscriptDeltaTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioTranscriptDeltaType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioTranscriptDeltaTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioTranscriptDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioTranscriptDeltaType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioTranscriptDeltaTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs new file mode 100644 index 0000000000..62b76dbe9b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioTranscriptDoneTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioTranscriptDoneType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioTranscriptDoneTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioTranscriptDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioTranscriptDoneType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioTranscriptDoneTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneTypeNullable.g.verified.cs new file mode 100644 index 0000000000..5488d127df --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseAudioTranscriptDoneTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseAudioTranscriptDoneType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseAudioTranscriptDoneTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseAudioTranscriptDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseAudioTranscriptDoneType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseAudioTranscriptDoneTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs new file mode 100644 index 0000000000..1a30a36b17 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedPartType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseContentPartAddedPartType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartAddedPartTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseContentPartAddedPartType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseContentPartAddedPartTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseContentPartAddedPartType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseContentPartAddedPartType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseContentPartAddedPartTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeNullable.g.verified.cs new file mode 100644 index 0000000000..866c07a23b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartAddedPartTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseContentPartAddedPartType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseContentPartAddedPartTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseContentPartAddedPartType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseContentPartAddedPartType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseContentPartAddedPartTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedType.g.verified.cs new file mode 100644 index 0000000000..73da244ca8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseContentPartAddedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartAddedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseContentPartAddedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseContentPartAddedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseContentPartAddedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseContentPartAddedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseContentPartAddedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..2825a15797 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartAddedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseContentPartAddedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartAddedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseContentPartAddedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseContentPartAddedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseContentPartAddedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseContentPartAddedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseContentPartAddedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartDoneType.g.verified.cs new file mode 100644 index 0000000000..30dad2c247 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartDoneType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseContentPartDoneType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartDoneTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseContentPartDoneType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseContentPartDoneTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseContentPartDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseContentPartDoneType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseContentPartDoneTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartDoneTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartDoneTypeNullable.g.verified.cs new file mode 100644 index 0000000000..1e8c7c9594 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseContentPartDoneTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseContentPartDoneTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseContentPartDoneTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseContentPartDoneType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseContentPartDoneTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseContentPartDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseContentPartDoneType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseContentPartDoneTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseCreatedType.g.verified.cs new file mode 100644 index 0000000000..9bc8cdc28c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseCreatedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseCreatedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseCreatedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseCreatedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseCreatedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseCreatedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseCreatedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseCreatedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseCreatedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..29c519e57e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseCreatedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseCreatedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseCreatedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseCreatedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseCreatedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseCreatedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseCreatedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseDoneType.g.verified.cs new file mode 100644 index 0000000000..227b737d34 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseDoneType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseDoneType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseDoneTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseDoneType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseDoneTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseDoneType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseDoneTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseDoneTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseDoneTypeNullable.g.verified.cs new file mode 100644 index 0000000000..09549677eb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseDoneTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseDoneTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseDoneTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseDoneType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseDoneTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseDoneType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseDoneTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs new file mode 100644 index 0000000000..48cf12b9f5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeNullable.g.verified.cs new file mode 100644 index 0000000000..9448c617ad --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs new file mode 100644 index 0000000000..9afbea503a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseFunctionCallArgumentsDoneTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeNullable.g.verified.cs new file mode 100644 index 0000000000..4f8eeae633 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseFunctionCallArgumentsDoneTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs new file mode 100644 index 0000000000..a1aa31c2a7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemAddedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseOutputItemAddedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseOutputItemAddedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseOutputItemAddedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseOutputItemAddedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseOutputItemAddedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseOutputItemAddedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseOutputItemAddedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemAddedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemAddedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..71f366bf86 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemAddedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseOutputItemAddedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseOutputItemAddedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseOutputItemAddedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseOutputItemAddedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseOutputItemAddedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseOutputItemAddedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseOutputItemAddedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs new file mode 100644 index 0000000000..94840c3f2f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemDoneType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseOutputItemDoneType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseOutputItemDoneTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseOutputItemDoneType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseOutputItemDoneTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseOutputItemDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseOutputItemDoneType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseOutputItemDoneTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemDoneTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemDoneTypeNullable.g.verified.cs new file mode 100644 index 0000000000..d8ad5b8972 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseOutputItemDoneTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseOutputItemDoneTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseOutputItemDoneTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseOutputItemDoneType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseOutputItemDoneTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseOutputItemDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseOutputItemDoneType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseOutputItemDoneTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDeltaType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDeltaType.g.verified.cs new file mode 100644 index 0000000000..284ad9ce95 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDeltaType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseTextDeltaType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseTextDeltaTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseTextDeltaType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseTextDeltaTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseTextDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseTextDeltaType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseTextDeltaTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDeltaTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDeltaTypeNullable.g.verified.cs new file mode 100644 index 0000000000..62d4a2b620 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDeltaTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseTextDeltaTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseTextDeltaTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseTextDeltaType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseTextDeltaTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseTextDeltaType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseTextDeltaType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseTextDeltaTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDoneType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDoneType.g.verified.cs new file mode 100644 index 0000000000..56e960dd0e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDoneType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventResponseTextDoneType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseTextDoneTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseTextDoneType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseTextDoneTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseTextDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseTextDoneType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventResponseTextDoneTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDoneTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDoneTypeNullable.g.verified.cs new file mode 100644 index 0000000000..b4ceca3c2f --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventResponseTextDoneTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventResponseTextDoneTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventResponseTextDoneTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventResponseTextDoneType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventResponseTextDoneTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventResponseTextDoneType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventResponseTextDoneType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventResponseTextDoneTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionCreatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionCreatedType.g.verified.cs new file mode 100644 index 0000000000..bfeb295c57 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionCreatedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventSessionCreatedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionCreatedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventSessionCreatedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventSessionCreatedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventSessionCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventSessionCreatedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventSessionCreatedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionCreatedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionCreatedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..ff17d63ad2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionCreatedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventSessionCreatedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionCreatedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventSessionCreatedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventSessionCreatedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventSessionCreatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventSessionCreatedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventSessionCreatedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionUpdatedType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionUpdatedType.g.verified.cs new file mode 100644 index 0000000000..607e7c155a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionUpdatedType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeServerEventSessionUpdatedType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionUpdatedTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventSessionUpdatedType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventSessionUpdatedTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventSessionUpdatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventSessionUpdatedType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeServerEventSessionUpdatedTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionUpdatedTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionUpdatedTypeNullable.g.verified.cs new file mode 100644 index 0000000000..1b7f395edd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeServerEventSessionUpdatedTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeServerEventSessionUpdatedTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeServerEventSessionUpdatedTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeServerEventSessionUpdatedType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeServerEventSessionUpdatedTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeServerEventSessionUpdatedType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeServerEventSessionUpdatedType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeServerEventSessionUpdatedTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionMaxResponseOutputTokens.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionMaxResponseOutputTokens.g.verified.cs new file mode 100644 index 0000000000..9032a303dd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionMaxResponseOutputTokens.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeSessionMaxResponseOutputTokens.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionMaxResponseOutputTokensJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionMaxResponseOutputTokens Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionMaxResponseOutputTokensExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionMaxResponseOutputTokens value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeSessionMaxResponseOutputTokensExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionMaxResponseOutputTokensNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionMaxResponseOutputTokensNullable.g.verified.cs new file mode 100644 index 0000000000..af4a58c8eb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionMaxResponseOutputTokensNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeSessionMaxResponseOutputTokensNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionMaxResponseOutputTokensNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionMaxResponseOutputTokens? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionMaxResponseOutputTokensExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionMaxResponseOutputTokens)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionMaxResponseOutputTokens? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeSessionMaxResponseOutputTokensExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionModalitie.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionModalitie.g.verified.cs new file mode 100644 index 0000000000..cf0202bbe5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionModalitie.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeSessionModalitie.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionModalitieJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionModalitie Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionModalitieExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionModalitie value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeSessionModalitieExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionModalitieNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionModalitieNullable.g.verified.cs new file mode 100644 index 0000000000..518c00de68 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionModalitieNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeSessionModalitieNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionModalitieNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionModalitie? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionModalitieExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionModalitie)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionModalitie? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeSessionModalitieExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionToolType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionToolType.g.verified.cs new file mode 100644 index 0000000000..cda9e2eb89 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionToolType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeSessionToolType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionToolTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionToolType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionToolTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionToolType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeSessionToolTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionToolTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionToolTypeNullable.g.verified.cs new file mode 100644 index 0000000000..2637ab1961 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionToolTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeSessionToolTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionToolTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionToolType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionToolTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionToolType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionToolType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeSessionToolTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionVoice.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionVoice.g.verified.cs new file mode 100644 index 0000000000..106801a37e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionVoice.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RealtimeSessionVoice.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionVoiceJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionVoice Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionVoiceExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionVoice value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RealtimeSessionVoiceExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionVoiceNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionVoiceNullable.g.verified.cs new file mode 100644 index 0000000000..5d78072478 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RealtimeSessionVoiceNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RealtimeSessionVoiceNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RealtimeSessionVoiceNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RealtimeSessionVoice? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RealtimeSessionVoiceExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RealtimeSessionVoice)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RealtimeSessionVoice? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RealtimeSessionVoiceExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ResultItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ResultItem.g.verified.cs new file mode 100644 index 0000000000..6e9be327ba --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ResultItem.g.verified.cs @@ -0,0 +1,170 @@ +//HintName: JsonConverters.ResultItem.g.cs +#nullable enable +#pragma warning disable CS0618 // Type or member is obsolete + +namespace G.JsonConverters +{ + /// + public class ResultItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.ResultItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + options = options ?? throw new global::System.ArgumentNullException(nameof(options)); + var typeInfoResolver = options.TypeInfoResolver ?? throw new global::System.InvalidOperationException("TypeInfoResolver is not set."); + + + var readerCopy = reader; + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageTimeBucketResultItemDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageTimeBucketResultItemDiscriminator)}"); + var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); + + global::G.UsageCompletionsResult? organizationUsageCompletionsResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCompletionsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageCompletionsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageCompletionsResult)}"); + organizationUsageCompletionsResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.UsageEmbeddingsResult? organizationUsageEmbeddingsResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageEmbeddingsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageEmbeddingsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageEmbeddingsResult)}"); + organizationUsageEmbeddingsResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.UsageModerationsResult? organizationUsageModerationsResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageModerationsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageModerationsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageModerationsResult)}"); + organizationUsageModerationsResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.UsageImagesResult? organizationUsageImagesResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageImagesResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageImagesResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageImagesResult)}"); + organizationUsageImagesResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.UsageAudioSpeechesResult? organizationUsageAudioSpeechesResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioSpeechesResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageAudioSpeechesResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageAudioSpeechesResult)}"); + organizationUsageAudioSpeechesResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.UsageAudioTranscriptionsResult? organizationUsageAudioTranscriptionsResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageAudioTranscriptionsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageAudioTranscriptionsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageAudioTranscriptionsResult)}"); + organizationUsageAudioTranscriptionsResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.UsageVectorStoresResult? organizationUsageVectorStoresResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageVectorStoresResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageVectorStoresResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageVectorStoresResult)}"); + organizationUsageVectorStoresResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.UsageCodeInterpreterSessionsResult? organizationUsageCodeInterpreterSessionsResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationUsageCodeInterpreterSessionsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageCodeInterpreterSessionsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.UsageCodeInterpreterSessionsResult)}"); + organizationUsageCodeInterpreterSessionsResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + global::G.CostsResult? organizationCostsResult = default; + if (discriminator?.Object == global::G.UsageTimeBucketResultItemDiscriminatorObject.OrganizationCostsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CostsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CostsResult)}"); + organizationCostsResult = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } + + var result = new global::G.ResultItem( + discriminator?.Object, + organizationUsageCompletionsResult, + organizationUsageEmbeddingsResult, + organizationUsageModerationsResult, + organizationUsageImagesResult, + organizationUsageAudioSpeechesResult, + organizationUsageAudioTranscriptionsResult, + organizationUsageVectorStoresResult, + organizationUsageCodeInterpreterSessionsResult, + organizationCostsResult + ); + + return result; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.ResultItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + options = options ?? throw new global::System.ArgumentNullException(nameof(options)); + var typeInfoResolver = options.TypeInfoResolver ?? throw new global::System.InvalidOperationException("TypeInfoResolver is not set."); + + if (value.IsOrganizationUsageCompletionsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageCompletionsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageCompletionsResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageCompletionsResult, typeInfo); + } + else if (value.IsOrganizationUsageEmbeddingsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageEmbeddingsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageEmbeddingsResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageEmbeddingsResult, typeInfo); + } + else if (value.IsOrganizationUsageModerationsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageModerationsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageModerationsResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageModerationsResult, typeInfo); + } + else if (value.IsOrganizationUsageImagesResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageImagesResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageImagesResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageImagesResult, typeInfo); + } + else if (value.IsOrganizationUsageAudioSpeechesResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageAudioSpeechesResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageAudioSpeechesResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageAudioSpeechesResult, typeInfo); + } + else if (value.IsOrganizationUsageAudioTranscriptionsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageAudioTranscriptionsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageAudioTranscriptionsResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageAudioTranscriptionsResult, typeInfo); + } + else if (value.IsOrganizationUsageVectorStoresResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageVectorStoresResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageVectorStoresResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageVectorStoresResult, typeInfo); + } + else if (value.IsOrganizationUsageCodeInterpreterSessionsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.UsageCodeInterpreterSessionsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.UsageCodeInterpreterSessionsResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationUsageCodeInterpreterSessionsResult, typeInfo); + } + else if (value.IsOrganizationCostsResult) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CostsResult), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.CostsResult).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.OrganizationCostsResult, typeInfo); + } + } + } +} \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs new file mode 100644 index 0000000000..313c62ffc3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullable.g.verified.cs new file mode 100644 index 0000000000..93e1992bc0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs new file mode 100644 index 0000000000..d3ee34fa38 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemType.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullable.g.verified.cs new file mode 100644 index 0000000000..064f6969b4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem.g.verified.cs index ad90f1c088..3f56ffa7b9 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem.g.verified.cs @@ -18,29 +18,29 @@ public class ToolCallsItemJsonConverter : global::System.Text.Json.Serialization var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsObjectToolCallDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsObjectToolCallDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); - global::G.RunStepDetailsToolCallsCodeObject? codeInterpreter = default; - if (discriminator?.Type == global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType.CodeInterpreter) + global::G.RunStepDeltaStepDetailsToolCallsCodeObject? codeInterpreter = default; + if (discriminator?.Type == global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType.CodeInterpreter) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsCodeObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.RunStepDetailsToolCallsFileSearchObject? fileSearch = default; - if (discriminator?.Type == global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType.FileSearch) + global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? fileSearch = default; + if (discriminator?.Type == global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType.FileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsFileSearchObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.RunStepDetailsToolCallsFunctionObject? function = default; - if (discriminator?.Type == global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType.Function) + global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? function = default; + if (discriminator?.Type == global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType.Function) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsFunctionObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject)}"); function = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } @@ -65,20 +65,20 @@ public override void Write( if (value.IsCodeInterpreter) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDetailsToolCallsCodeObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.CodeInterpreter, typeInfo); } else if (value.IsFileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDetailsToolCallsFileSearchObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.FileSearch, typeInfo); } else if (value.IsFunction) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDetailsToolCallsFunctionObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.Function, typeInfo); } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem2.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem2.g.verified.cs index d8c2924dee..0d8d8b79a6 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem2.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolCallsItem2.g.verified.cs @@ -18,29 +18,29 @@ public class ToolCallsItem2JsonConverter : global::System.Text.Json.Serializatio var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsObjectToolCallDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsObjectToolCallDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); - global::G.RunStepDeltaStepDetailsToolCallsCodeObject? codeInterpreter = default; - if (discriminator?.Type == global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType.CodeInterpreter) + global::G.RunStepDetailsToolCallsCodeObject? codeInterpreter = default; + if (discriminator?.Type == global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType.CodeInterpreter) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsCodeObject)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? fileSearch = default; - if (discriminator?.Type == global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType.FileSearch) + global::G.RunStepDetailsToolCallsFileSearchObject? fileSearch = default; + if (discriminator?.Type == global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType.FileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsFileSearchObject)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? function = default; - if (discriminator?.Type == global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType.Function) + global::G.RunStepDetailsToolCallsFunctionObject? function = default; + if (discriminator?.Type == global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType.Function) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunStepDetailsToolCallsFunctionObject)}"); function = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } @@ -65,20 +65,20 @@ public override void Write( if (value.IsCodeInterpreter) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDeltaStepDetailsToolCallsCodeObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsCodeObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDetailsToolCallsCodeObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.CodeInterpreter, typeInfo); } else if (value.IsFileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFileSearchObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDetailsToolCallsFileSearchObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.FileSearch, typeInfo); } else if (value.IsFunction) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDeltaStepDetailsToolCallsFunctionObject).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunStepDetailsToolCallsFunctionObject), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.RunStepDetailsToolCallsFunctionObject).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.Function, typeInfo); } } diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem3.g.verified.cs index 17bb90eee0..42883e2eb2 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem3.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem3.g.verified.cs @@ -18,37 +18,29 @@ public class ToolsItem3JsonConverter : global::System.Text.Json.Serialization.Js var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.ModifyAssistantRequestToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.ModifyAssistantRequestToolDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateMessageRequestAttachmentToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateMessageRequestAttachmentToolDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); global::G.AssistantToolsCode? codeInterpreter = default; - if (discriminator?.Type == global::G.ModifyAssistantRequestToolDiscriminatorType.CodeInterpreter) + if (discriminator?.Type == global::G.CreateMessageRequestAttachmentToolDiscriminatorType.CodeInterpreter) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsCode), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsCode)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.AssistantToolsFileSearch? fileSearch = default; - if (discriminator?.Type == global::G.ModifyAssistantRequestToolDiscriminatorType.FileSearch) + global::G.AssistantToolsFileSearchTypeOnly? fileSearch = default; + if (discriminator?.Type == global::G.CreateMessageRequestAttachmentToolDiscriminatorType.FileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearch)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearchTypeOnly)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.AssistantToolsFunction? function = default; - if (discriminator?.Type == global::G.ModifyAssistantRequestToolDiscriminatorType.Function) - { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFunction)}"); - function = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); - } var result = new global::G.ToolsItem3( discriminator?.Type, codeInterpreter, - fileSearch, - function + fileSearch ); return result; @@ -71,16 +63,10 @@ public override void Write( } else if (value.IsFileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearch).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearchTypeOnly).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.FileSearch, typeInfo); } - else if (value.IsFunction) - { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFunction).Name}"); - global::System.Text.Json.JsonSerializer.Serialize(writer, value.Function, typeInfo); - } } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem4.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem4.g.verified.cs index 85415814ec..d200a91900 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem4.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem4.g.verified.cs @@ -18,26 +18,26 @@ public class ToolsItem4JsonConverter : global::System.Text.Json.Serialization.Js var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunObjectToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunObjectToolDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateRunRequestToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateRunRequestToolDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); global::G.AssistantToolsCode? codeInterpreter = default; - if (discriminator?.Type == global::G.RunObjectToolDiscriminatorType.CodeInterpreter) + if (discriminator?.Type == global::G.CreateRunRequestToolDiscriminatorType.CodeInterpreter) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsCode), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsCode)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } global::G.AssistantToolsFileSearch? fileSearch = default; - if (discriminator?.Type == global::G.RunObjectToolDiscriminatorType.FileSearch) + if (discriminator?.Type == global::G.CreateRunRequestToolDiscriminatorType.FileSearch) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearch)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } global::G.AssistantToolsFunction? function = default; - if (discriminator?.Type == global::G.RunObjectToolDiscriminatorType.Function) + if (discriminator?.Type == global::G.CreateRunRequestToolDiscriminatorType.Function) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFunction)}"); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem5.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem5.g.verified.cs index 3a7cdbc47e..6c2f032ca0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem5.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem5.g.verified.cs @@ -18,26 +18,26 @@ public class ToolsItem5JsonConverter : global::System.Text.Json.Serialization.Js var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateRunRequestToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateRunRequestToolDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateThreadAndRunRequestToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateThreadAndRunRequestToolDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); global::G.AssistantToolsCode? codeInterpreter = default; - if (discriminator?.Type == global::G.CreateRunRequestToolDiscriminatorType.CodeInterpreter) + if (discriminator?.Type == global::G.CreateThreadAndRunRequestToolDiscriminatorType.CodeInterpreter) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsCode), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsCode)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } global::G.AssistantToolsFileSearch? fileSearch = default; - if (discriminator?.Type == global::G.CreateRunRequestToolDiscriminatorType.FileSearch) + if (discriminator?.Type == global::G.CreateThreadAndRunRequestToolDiscriminatorType.FileSearch) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearch)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } global::G.AssistantToolsFunction? function = default; - if (discriminator?.Type == global::G.CreateRunRequestToolDiscriminatorType.Function) + if (discriminator?.Type == global::G.CreateThreadAndRunRequestToolDiscriminatorType.Function) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFunction)}"); diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem6.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem6.g.verified.cs index 9506ad363a..6fdeafd719 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem6.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem6.g.verified.cs @@ -18,37 +18,29 @@ public class ToolsItem6JsonConverter : global::System.Text.Json.Serialization.Js var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateThreadAndRunRequestToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateThreadAndRunRequestToolDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageObjectAttachmentToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageObjectAttachmentToolDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); global::G.AssistantToolsCode? codeInterpreter = default; - if (discriminator?.Type == global::G.CreateThreadAndRunRequestToolDiscriminatorType.CodeInterpreter) + if (discriminator?.Type == global::G.MessageObjectAttachmentToolDiscriminatorType.CodeInterpreter) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsCode), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsCode)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.AssistantToolsFileSearch? fileSearch = default; - if (discriminator?.Type == global::G.CreateThreadAndRunRequestToolDiscriminatorType.FileSearch) + global::G.AssistantToolsFileSearchTypeOnly? fileSearch = default; + if (discriminator?.Type == global::G.MessageObjectAttachmentToolDiscriminatorType.FileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearch)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearchTypeOnly)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.AssistantToolsFunction? function = default; - if (discriminator?.Type == global::G.CreateThreadAndRunRequestToolDiscriminatorType.Function) - { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFunction)}"); - function = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); - } var result = new global::G.ToolsItem6( discriminator?.Type, codeInterpreter, - fileSearch, - function + fileSearch ); return result; @@ -71,16 +63,10 @@ public override void Write( } else if (value.IsFileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearch).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearchTypeOnly).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.FileSearch, typeInfo); } - else if (value.IsFunction) - { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFunction).Name}"); - global::System.Text.Json.JsonSerializer.Serialize(writer, value.Function, typeInfo); - } } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem7.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem7.g.verified.cs index 3abbeb6d75..ea563aa7ed 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem7.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem7.g.verified.cs @@ -18,29 +18,37 @@ public class ToolsItem7JsonConverter : global::System.Text.Json.Serialization.Js var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.MessageObjectAttachmentToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.MessageObjectAttachmentToolDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.ModifyAssistantRequestToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.ModifyAssistantRequestToolDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); global::G.AssistantToolsCode? codeInterpreter = default; - if (discriminator?.Type == global::G.MessageObjectAttachmentToolDiscriminatorType.CodeInterpreter) + if (discriminator?.Type == global::G.ModifyAssistantRequestToolDiscriminatorType.CodeInterpreter) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsCode), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsCode)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.AssistantToolsFileSearchTypeOnly? fileSearch = default; - if (discriminator?.Type == global::G.MessageObjectAttachmentToolDiscriminatorType.FileSearch) + global::G.AssistantToolsFileSearch? fileSearch = default; + if (discriminator?.Type == global::G.ModifyAssistantRequestToolDiscriminatorType.FileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearchTypeOnly)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearch)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } + global::G.AssistantToolsFunction? function = default; + if (discriminator?.Type == global::G.ModifyAssistantRequestToolDiscriminatorType.Function) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFunction)}"); + function = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } var result = new global::G.ToolsItem7( discriminator?.Type, codeInterpreter, - fileSearch + fileSearch, + function ); return result; @@ -63,10 +71,16 @@ public override void Write( } else if (value.IsFileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearchTypeOnly).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearch).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.FileSearch, typeInfo); } + else if (value.IsFunction) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFunction).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Function, typeInfo); + } } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem8.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem8.g.verified.cs index 3230444183..ccf928552d 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem8.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.ToolsItem8.g.verified.cs @@ -18,29 +18,37 @@ public class ToolsItem8JsonConverter : global::System.Text.Json.Serialization.Js var readerCopy = reader; - var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.CreateMessageRequestAttachmentToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.CreateMessageRequestAttachmentToolDiscriminator)}"); + var discriminatorTypeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.RunObjectToolDiscriminator), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.RunObjectToolDiscriminator)}"); var discriminator = global::System.Text.Json.JsonSerializer.Deserialize(ref readerCopy, discriminatorTypeInfo); global::G.AssistantToolsCode? codeInterpreter = default; - if (discriminator?.Type == global::G.CreateMessageRequestAttachmentToolDiscriminatorType.CodeInterpreter) + if (discriminator?.Type == global::G.RunObjectToolDiscriminatorType.CodeInterpreter) { var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsCode), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsCode)}"); codeInterpreter = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } - global::G.AssistantToolsFileSearchTypeOnly? fileSearch = default; - if (discriminator?.Type == global::G.CreateMessageRequestAttachmentToolDiscriminatorType.FileSearch) + global::G.AssistantToolsFileSearch? fileSearch = default; + if (discriminator?.Type == global::G.RunObjectToolDiscriminatorType.FileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearchTypeOnly)}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFileSearch)}"); fileSearch = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); } + global::G.AssistantToolsFunction? function = default; + if (discriminator?.Type == global::G.RunObjectToolDiscriminatorType.Function) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {nameof(global::G.AssistantToolsFunction)}"); + function = global::System.Text.Json.JsonSerializer.Deserialize(ref reader, typeInfo); + } var result = new global::G.ToolsItem8( discriminator?.Type, codeInterpreter, - fileSearch + fileSearch, + function ); return result; @@ -63,10 +71,16 @@ public override void Write( } else if (value.IsFileSearch) { - var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearchTypeOnly), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? - throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearchTypeOnly).Name}"); + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFileSearch), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFileSearch).Name}"); global::System.Text.Json.JsonSerializer.Serialize(writer, value.FileSearch, typeInfo); } + else if (value.IsFunction) + { + var typeInfo = typeInfoResolver.GetTypeInfo(typeof(global::G.AssistantToolsFunction), options) as global::System.Text.Json.Serialization.Metadata.JsonTypeInfo ?? + throw new global::System.InvalidOperationException($"Cannot get type info for {typeof(global::G.AssistantToolsFunction).Name}"); + global::System.Text.Json.JsonSerializer.Serialize(writer, value.Function, typeInfo); + } } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesBucketWidth.g.verified.cs new file mode 100644 index 0000000000..fe6c736aa6 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageAudioSpeechesBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioSpeechesBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioSpeechesBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioSpeechesBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioSpeechesBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioSpeechesBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageAudioSpeechesBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..43bfa29db5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageAudioSpeechesBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioSpeechesBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioSpeechesBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioSpeechesBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioSpeechesBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioSpeechesBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageAudioSpeechesBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesGroupByItem.g.verified.cs new file mode 100644 index 0000000000..d3940e56f7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageAudioSpeechesGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioSpeechesGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioSpeechesGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioSpeechesGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioSpeechesGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioSpeechesGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageAudioSpeechesGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..22a9307aac --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageAudioSpeechesGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioSpeechesGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioSpeechesGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioSpeechesGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioSpeechesGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioSpeechesGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageAudioSpeechesGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesResultObject.g.verified.cs new file mode 100644 index 0000000000..fa3e7d770a --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageAudioSpeechesResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioSpeechesResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioSpeechesResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioSpeechesResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioSpeechesResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioSpeechesResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageAudioSpeechesResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..e7164ffcae --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioSpeechesResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageAudioSpeechesResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioSpeechesResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioSpeechesResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioSpeechesResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioSpeechesResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioSpeechesResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageAudioSpeechesResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..660f08b2eb --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageAudioTranscriptionsBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioTranscriptionsBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioTranscriptionsBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioTranscriptionsBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioTranscriptionsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioTranscriptionsBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageAudioTranscriptionsBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..e1d8adc7bf --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageAudioTranscriptionsBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioTranscriptionsBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioTranscriptionsBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioTranscriptionsBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioTranscriptionsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioTranscriptionsBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageAudioTranscriptionsBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..182bebd1ba --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageAudioTranscriptionsGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioTranscriptionsGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioTranscriptionsGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioTranscriptionsGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioTranscriptionsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioTranscriptionsGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageAudioTranscriptionsGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..270b43de62 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageAudioTranscriptionsGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioTranscriptionsGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioTranscriptionsGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioTranscriptionsGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioTranscriptionsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioTranscriptionsGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageAudioTranscriptionsGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsResultObject.g.verified.cs new file mode 100644 index 0000000000..56558e73cd --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageAudioTranscriptionsResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioTranscriptionsResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioTranscriptionsResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioTranscriptionsResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioTranscriptionsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioTranscriptionsResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageAudioTranscriptionsResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..4ed162c9ee --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageAudioTranscriptionsResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageAudioTranscriptionsResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageAudioTranscriptionsResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageAudioTranscriptionsResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageAudioTranscriptionsResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageAudioTranscriptionsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageAudioTranscriptionsResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageAudioTranscriptionsResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..1dc7069040 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCodeInterpreterSessionsBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCodeInterpreterSessionsBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCodeInterpreterSessionsBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCodeInterpreterSessionsBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCodeInterpreterSessionsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCodeInterpreterSessionsBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCodeInterpreterSessionsBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..b98ba9ea44 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCodeInterpreterSessionsBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCodeInterpreterSessionsBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCodeInterpreterSessionsBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCodeInterpreterSessionsBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCodeInterpreterSessionsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCodeInterpreterSessionsBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCodeInterpreterSessionsBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..19ebfb14c0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCodeInterpreterSessionsGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCodeInterpreterSessionsGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCodeInterpreterSessionsGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCodeInterpreterSessionsGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCodeInterpreterSessionsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCodeInterpreterSessionsGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCodeInterpreterSessionsGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..54b7a7653c --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCodeInterpreterSessionsGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCodeInterpreterSessionsGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCodeInterpreterSessionsGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCodeInterpreterSessionsGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCodeInterpreterSessionsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCodeInterpreterSessionsGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCodeInterpreterSessionsGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsResultObject.g.verified.cs new file mode 100644 index 0000000000..7454574b31 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCodeInterpreterSessionsResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCodeInterpreterSessionsResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCodeInterpreterSessionsResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCodeInterpreterSessionsResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCodeInterpreterSessionsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCodeInterpreterSessionsResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCodeInterpreterSessionsResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..e16bad34ff --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCodeInterpreterSessionsResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCodeInterpreterSessionsResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCodeInterpreterSessionsResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCodeInterpreterSessionsResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCodeInterpreterSessionsResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCodeInterpreterSessionsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCodeInterpreterSessionsResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCodeInterpreterSessionsResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..77e95a3494 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCompletionsBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCompletionsBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCompletionsBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCompletionsBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCompletionsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCompletionsBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCompletionsBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..537c84dda0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCompletionsBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCompletionsBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCompletionsBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCompletionsBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCompletionsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCompletionsBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCompletionsBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..ba9f4790ae --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCompletionsGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCompletionsGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCompletionsGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCompletionsGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCompletionsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCompletionsGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCompletionsGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..7ab51399ad --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCompletionsGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCompletionsGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCompletionsGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCompletionsGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCompletionsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCompletionsGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCompletionsGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsResultObject.g.verified.cs new file mode 100644 index 0000000000..14115edd3b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCompletionsResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCompletionsResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCompletionsResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCompletionsResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCompletionsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCompletionsResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCompletionsResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..9b0eadc1e9 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCompletionsResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCompletionsResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCompletionsResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCompletionsResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCompletionsResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCompletionsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCompletionsResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCompletionsResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..fce7ccfbae --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCostsBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCostsBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCostsBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCostsBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCostsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCostsBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCostsBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..85538b56a4 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCostsBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCostsBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCostsBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCostsBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCostsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCostsBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCostsBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..e0df46a3e8 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageCostsGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCostsGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCostsGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCostsGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCostsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCostsGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageCostsGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..35263e1192 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageCostsGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageCostsGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageCostsGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageCostsGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageCostsGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageCostsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageCostsGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageCostsGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..7b292c2f11 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageEmbeddingsBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageEmbeddingsBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageEmbeddingsBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageEmbeddingsBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageEmbeddingsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageEmbeddingsBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageEmbeddingsBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..14264d03fc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageEmbeddingsBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageEmbeddingsBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageEmbeddingsBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageEmbeddingsBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageEmbeddingsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageEmbeddingsBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageEmbeddingsBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..e8a703c088 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageEmbeddingsGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageEmbeddingsGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageEmbeddingsGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageEmbeddingsGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageEmbeddingsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageEmbeddingsGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageEmbeddingsGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..db33bb4582 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageEmbeddingsGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageEmbeddingsGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageEmbeddingsGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageEmbeddingsGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageEmbeddingsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageEmbeddingsGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageEmbeddingsGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsResultObject.g.verified.cs new file mode 100644 index 0000000000..704126779e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageEmbeddingsResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageEmbeddingsResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageEmbeddingsResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageEmbeddingsResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageEmbeddingsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageEmbeddingsResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageEmbeddingsResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..e5cb598355 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageEmbeddingsResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageEmbeddingsResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageEmbeddingsResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageEmbeddingsResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageEmbeddingsResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageEmbeddingsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageEmbeddingsResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageEmbeddingsResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesBucketWidth.g.verified.cs new file mode 100644 index 0000000000..8562cceeb5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageImagesBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageImagesBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..04ece257fa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageImagesBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageImagesBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesGroupByItem.g.verified.cs new file mode 100644 index 0000000000..339d01d633 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageImagesGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageImagesGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..a407480757 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageImagesGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageImagesGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesResultObject.g.verified.cs new file mode 100644 index 0000000000..dfdff11280 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageImagesResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageImagesResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..012a6a2062 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageImagesResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageImagesResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSize.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSize.g.verified.cs new file mode 100644 index 0000000000..aa3671ce0b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSize.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageImagesSize.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesSizeJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesSize Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesSizeExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesSize value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageImagesSizeExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSizeNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSizeNullable.g.verified.cs new file mode 100644 index 0000000000..b27c25479b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSizeNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageImagesSizeNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesSizeNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesSize? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesSizeExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesSize)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesSize? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageImagesSizeExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSource.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSource.g.verified.cs new file mode 100644 index 0000000000..05abe371e5 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSource.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageImagesSource.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesSourceJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesSource Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesSourceExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesSource)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesSource value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageImagesSourceExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSourceNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSourceNullable.g.verified.cs new file mode 100644 index 0000000000..9b5b1d334e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageImagesSourceNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageImagesSourceNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageImagesSourceNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageImagesSource? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageImagesSourceExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageImagesSource)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageImagesSource? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageImagesSourceExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsBucketWidth.g.verified.cs new file mode 100644 index 0000000000..07a4e6189e --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageModerationsBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageModerationsBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageModerationsBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageModerationsBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageModerationsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageModerationsBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageModerationsBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..6cfd55f9bc --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageModerationsBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageModerationsBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageModerationsBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageModerationsBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageModerationsBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageModerationsBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageModerationsBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsGroupByItem.g.verified.cs new file mode 100644 index 0000000000..9637f8c3a7 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageModerationsGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageModerationsGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageModerationsGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageModerationsGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageModerationsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageModerationsGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageModerationsGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..656015f007 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageModerationsGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageModerationsGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageModerationsGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageModerationsGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageModerationsGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageModerationsGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageModerationsGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsResultObject.g.verified.cs new file mode 100644 index 0000000000..596ebb2926 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageModerationsResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageModerationsResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageModerationsResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageModerationsResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageModerationsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageModerationsResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageModerationsResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..b62056dc8d --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageModerationsResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageModerationsResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageModerationsResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageModerationsResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageModerationsResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageModerationsResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageModerationsResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageModerationsResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageResponseObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageResponseObject.g.verified.cs new file mode 100644 index 0000000000..cbac2fbff3 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageResponseObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageResponseObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageResponseObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageResponseObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageResponseObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageResponseObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageResponseObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageResponseObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageResponseObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageResponseObjectNullable.g.verified.cs new file mode 100644 index 0000000000..d1a3a7c872 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageResponseObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageResponseObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageResponseObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageResponseObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageResponseObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageResponseObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageResponseObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageResponseObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketObject.g.verified.cs new file mode 100644 index 0000000000..32cd505b51 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageTimeBucketObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageTimeBucketObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageTimeBucketObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageTimeBucketObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageTimeBucketObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageTimeBucketObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageTimeBucketObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketObjectNullable.g.verified.cs new file mode 100644 index 0000000000..b852e9b714 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageTimeBucketObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageTimeBucketObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageTimeBucketObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageTimeBucketObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageTimeBucketObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageTimeBucketObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageTimeBucketObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs new file mode 100644 index 0000000000..3ef31f9330 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketResultItemDiscriminatorObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageTimeBucketResultItemDiscriminatorObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageTimeBucketResultItemDiscriminatorObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageTimeBucketResultItemDiscriminatorObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageTimeBucketResultItemDiscriminatorObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageTimeBucketResultItemDiscriminatorObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageTimeBucketResultItemDiscriminatorObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageTimeBucketResultItemDiscriminatorObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketResultItemDiscriminatorObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketResultItemDiscriminatorObjectNullable.g.verified.cs new file mode 100644 index 0000000000..e9317c141b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageTimeBucketResultItemDiscriminatorObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageTimeBucketResultItemDiscriminatorObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageTimeBucketResultItemDiscriminatorObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageTimeBucketResultItemDiscriminatorObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageTimeBucketResultItemDiscriminatorObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageTimeBucketResultItemDiscriminatorObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageTimeBucketResultItemDiscriminatorObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageTimeBucketResultItemDiscriminatorObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresBucketWidth.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresBucketWidth.g.verified.cs new file mode 100644 index 0000000000..30ba5dddf2 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresBucketWidth.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageVectorStoresBucketWidth.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageVectorStoresBucketWidthJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageVectorStoresBucketWidth Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageVectorStoresBucketWidthExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageVectorStoresBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageVectorStoresBucketWidth value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageVectorStoresBucketWidthExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresBucketWidthNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresBucketWidthNullable.g.verified.cs new file mode 100644 index 0000000000..02aefb33e0 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresBucketWidthNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageVectorStoresBucketWidthNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageVectorStoresBucketWidthNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageVectorStoresBucketWidth? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageVectorStoresBucketWidthExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageVectorStoresBucketWidth)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageVectorStoresBucketWidth? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageVectorStoresBucketWidthExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresGroupByItem.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresGroupByItem.g.verified.cs new file mode 100644 index 0000000000..531745e7ec --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresGroupByItem.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageVectorStoresGroupByItem.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageVectorStoresGroupByItemJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageVectorStoresGroupByItem Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageVectorStoresGroupByItemExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageVectorStoresGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageVectorStoresGroupByItem value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageVectorStoresGroupByItemExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresGroupByItemNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresGroupByItemNullable.g.verified.cs new file mode 100644 index 0000000000..6dd617f384 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresGroupByItemNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageVectorStoresGroupByItemNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageVectorStoresGroupByItemNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageVectorStoresGroupByItem? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageVectorStoresGroupByItemExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageVectorStoresGroupByItem)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageVectorStoresGroupByItem? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageVectorStoresGroupByItemExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresResultObject.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresResultObject.g.verified.cs new file mode 100644 index 0000000000..f645dac2ce --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresResultObject.g.verified.cs @@ -0,0 +1,50 @@ +//HintName: JsonConverters.UsageVectorStoresResultObject.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageVectorStoresResultObjectJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageVectorStoresResultObject Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageVectorStoresResultObjectExtensions.ToEnum(stringValue) ?? default; + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageVectorStoresResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageVectorStoresResultObject value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + writer.WriteStringValue(global::G.UsageVectorStoresResultObjectExtensions.ToValueString(value)); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresResultObjectNullable.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresResultObjectNullable.g.verified.cs new file mode 100644 index 0000000000..cffd30455b --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonConverters.UsageVectorStoresResultObjectNullable.g.verified.cs @@ -0,0 +1,57 @@ +//HintName: JsonConverters.UsageVectorStoresResultObjectNullable.g.cs +#nullable enable + +namespace G.JsonConverters +{ + /// + public sealed class UsageVectorStoresResultObjectNullableJsonConverter : global::System.Text.Json.Serialization.JsonConverter + { + /// + public override global::G.UsageVectorStoresResultObject? Read( + ref global::System.Text.Json.Utf8JsonReader reader, + global::System.Type typeToConvert, + global::System.Text.Json.JsonSerializerOptions options) + { + switch (reader.TokenType) + { + case global::System.Text.Json.JsonTokenType.String: + { + var stringValue = reader.GetString(); + if (stringValue != null) + { + return global::G.UsageVectorStoresResultObjectExtensions.ToEnum(stringValue); + } + + break; + } + case global::System.Text.Json.JsonTokenType.Number: + { + var numValue = reader.GetInt32(); + return (global::G.UsageVectorStoresResultObject)numValue; + } + default: + throw new global::System.ArgumentOutOfRangeException(nameof(reader)); + } + + return default; + } + + /// + public override void Write( + global::System.Text.Json.Utf8JsonWriter writer, + global::G.UsageVectorStoresResultObject? value, + global::System.Text.Json.JsonSerializerOptions options) + { + writer = writer ?? throw new global::System.ArgumentNullException(nameof(writer)); + + if (value == null) + { + writer.WriteNullValue(); + } + else + { + writer.WriteStringValue(global::G.UsageVectorStoresResultObjectExtensions.ToValueString(value.Value)); + } + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextConverters.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextConverters.g.verified.cs index 2bb2fe85d9..d3ff6e53c0 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextConverters.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextConverters.g.verified.cs @@ -11,194 +11,280 @@ internal sealed partial class JsonSerializerContextConverters { private readonly global::System.Type[] _types = new global::System.Type[] { - typeof(global::G.JsonConverters.ListModelsResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ListModelsResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.ModelObjectJsonConverter), - typeof(global::G.JsonConverters.ModelObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateCompletionRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateCompletionRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateCompletionResponseChoiceFinishReasonJsonConverter), - typeof(global::G.JsonConverters.CreateCompletionResponseChoiceFinishReasonNullableJsonConverter), - typeof(global::G.JsonConverters.CreateCompletionResponseObjectJsonConverter), - typeof(global::G.JsonConverters.CreateCompletionResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantObjectObjectJsonConverter), + typeof(global::G.JsonConverters.AssistantObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsCodeTypeJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsCodeTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeNullableJsonConverter), + typeof(global::G.JsonConverters.FileSearchRankingOptionsRankerJsonConverter), + typeof(global::G.JsonConverters.FileSearchRankingOptionsRankerNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsFunctionTypeJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsFunctionTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantObjectToolDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.AssistantObjectToolDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantsApiResponseFormatOptionEnumJsonConverter), + typeof(global::G.JsonConverters.AssistantsApiResponseFormatOptionEnumNullableJsonConverter), + typeof(global::G.JsonConverters.ResponseFormatTextTypeJsonConverter), + typeof(global::G.JsonConverters.ResponseFormatTextTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ResponseFormatJsonObjectTypeJsonConverter), + typeof(global::G.JsonConverters.ResponseFormatJsonObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ResponseFormatJsonSchemaTypeJsonConverter), + typeof(global::G.JsonConverters.ResponseFormatJsonSchemaTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ErrorEventEventJsonConverter), + typeof(global::G.JsonConverters.ErrorEventEventNullableJsonConverter), + typeof(global::G.JsonConverters.DoneEventEventJsonConverter), + typeof(global::G.JsonConverters.DoneEventEventNullableJsonConverter), + typeof(global::G.JsonConverters.DoneEventDataJsonConverter), + typeof(global::G.JsonConverters.DoneEventDataNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant3EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant3EventNullableJsonConverter), + typeof(global::G.JsonConverters.ThreadObjectObjectJsonConverter), + typeof(global::G.JsonConverters.ThreadObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant4EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant4EventNullableJsonConverter), + typeof(global::G.JsonConverters.RunObjectObjectJsonConverter), + typeof(global::G.JsonConverters.RunObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.RunObjectStatusJsonConverter), + typeof(global::G.JsonConverters.RunObjectStatusNullableJsonConverter), + typeof(global::G.JsonConverters.RunObjectRequiredActionTypeJsonConverter), + typeof(global::G.JsonConverters.RunObjectRequiredActionTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunToolCallObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunToolCallObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunObjectLastErrorCodeJsonConverter), + typeof(global::G.JsonConverters.RunObjectLastErrorCodeNullableJsonConverter), + typeof(global::G.JsonConverters.RunObjectIncompleteDetailsReasonJsonConverter), + typeof(global::G.JsonConverters.RunObjectIncompleteDetailsReasonNullableJsonConverter), + typeof(global::G.JsonConverters.RunObjectToolDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.RunObjectToolDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.TruncationObjectTypeJsonConverter), + typeof(global::G.JsonConverters.TruncationObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantsApiToolChoiceOptionEnumJsonConverter), + typeof(global::G.JsonConverters.AssistantsApiToolChoiceOptionEnumNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantsNamedToolChoiceTypeJsonConverter), + typeof(global::G.JsonConverters.AssistantsNamedToolChoiceTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant5EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant5EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant6EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant6EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant7EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant7EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant8EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant8EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant9EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant9EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant10EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant10EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant11EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant11EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant12EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant12EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant13EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant13EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant14EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant14EventNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectObjectJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectStatusJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectStatusNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsMessageCreationObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsMessageCreationObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRankerNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchResultObjectContentItemTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectToolCallDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectToolCallDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectStepDetailsDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectStepDetailsDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectLastErrorCodeJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectLastErrorCodeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant15EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant15EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant16EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant16EventNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaObjectObjectJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaObjectDeltaStepDetailsDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaObjectDeltaStepDetailsDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant17EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant17EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant18EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant18EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant19EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant19EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant20EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant20EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant21EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant21EventNullableJsonConverter), + typeof(global::G.JsonConverters.MessageObjectObjectJsonConverter), + typeof(global::G.JsonConverters.MessageObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.MessageObjectStatusJsonConverter), + typeof(global::G.JsonConverters.MessageObjectStatusNullableJsonConverter), + typeof(global::G.JsonConverters.MessageObjectIncompleteDetailsReasonJsonConverter), + typeof(global::G.JsonConverters.MessageObjectIncompleteDetailsReasonNullableJsonConverter), + typeof(global::G.JsonConverters.MessageObjectRoleJsonConverter), + typeof(global::G.JsonConverters.MessageObjectRoleNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageFileObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageFileObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageFileObjectImageFileDetailJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageFileObjectImageFileDetailNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageUrlObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageUrlObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageUrlObjectImageUrlDetailJsonConverter), + typeof(global::G.JsonConverters.MessageContentImageUrlObjectImageUrlDetailNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextObjectTextAnnotationDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.MessageContentTextObjectTextAnnotationDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageContentRefusalObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageContentRefusalObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageObjectContentItemDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.MessageObjectContentItemDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeJsonConverter), + typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageObjectAttachmentToolDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.MessageObjectAttachmentToolDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant22EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant22EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant23EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant23EventNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaObjectObjectJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaObjectDeltaRoleJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaObjectDeltaRoleNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTextAnnotationDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTextAnnotationDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentRefusalObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentRefusalObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectTypeNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailNullableJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaObjectDeltaContentItemDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.MessageDeltaObjectDeltaContentItemDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant24EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant24EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant25EventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventVariant25EventNullableJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventDiscriminatorEventJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventDiscriminatorEventNullableJsonConverter), + typeof(global::G.JsonConverters.AudioResponseFormatJsonConverter), + typeof(global::G.JsonConverters.AudioResponseFormatNullableJsonConverter), + typeof(global::G.JsonConverters.AuditLogEventTypeJsonConverter), + typeof(global::G.JsonConverters.AuditLogEventTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AuditLogActorTypeJsonConverter), + typeof(global::G.JsonConverters.AuditLogActorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AuditLogActorApiKeyTypeJsonConverter), + typeof(global::G.JsonConverters.AuditLogActorApiKeyTypeNullableJsonConverter), + typeof(global::G.JsonConverters.AutoChunkingStrategyRequestParamTypeJsonConverter), + typeof(global::G.JsonConverters.AutoChunkingStrategyRequestParamTypeNullableJsonConverter), + typeof(global::G.JsonConverters.BatchObjectJsonConverter), + typeof(global::G.JsonConverters.BatchObjectNullableJsonConverter), + typeof(global::G.JsonConverters.BatchStatusJsonConverter), + typeof(global::G.JsonConverters.BatchStatusNullableJsonConverter), + typeof(global::G.JsonConverters.BatchRequestInputMethodJsonConverter), + typeof(global::G.JsonConverters.BatchRequestInputMethodNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionMessageToolCallTypeJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionMessageToolCallTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionMessageToolCallChunkTypeJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionMessageToolCallChunkTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionModalitieJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionModalitieNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionNamedToolChoiceTypeJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionNamedToolChoiceTypeNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartTextTypeJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartTextTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartRefusalTypeJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartRefusalTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageContentPartDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageContentPartDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageRoleJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageRoleNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestFunctionMessageRoleJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestFunctionMessageRoleNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestSystemMessageRoleJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestSystemMessageRoleNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartImageImageUrlDetailNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartAudioTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestMessageContentPartAudioInputAudioFormatNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestUserMessageContentPartDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestUserMessageContentPartDiscriminatorTypeNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestUserMessageRoleJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestUserMessageRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageContentPartDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageContentPartDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageRoleJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionMessageToolCallTypeJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionMessageToolCallTypeNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestToolMessageRoleJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestToolMessageRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestFunctionMessageRoleJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestFunctionMessageRoleNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestMessageDiscriminatorRoleJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestMessageDiscriminatorRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionToolTypeJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionToolTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ResponseFormatTextTypeJsonConverter), - typeof(global::G.JsonConverters.ResponseFormatTextTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ResponseFormatJsonObjectTypeJsonConverter), - typeof(global::G.JsonConverters.ResponseFormatJsonObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ResponseFormatJsonSchemaTypeJsonConverter), - typeof(global::G.JsonConverters.ResponseFormatJsonSchemaTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionToolChoiceOptionEnumJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionToolChoiceOptionEnumNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionNamedToolChoiceTypeJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionNamedToolChoiceTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionMessageToolCallChunkTypeJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionMessageToolCallChunkTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRoleJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRoleNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionResponseMessageRoleJsonConverter), typeof(global::G.JsonConverters.ChatCompletionResponseMessageRoleNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRoleJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRoleNullableJsonConverter), typeof(global::G.JsonConverters.ChatCompletionStreamResponseDeltaRoleJsonConverter), typeof(global::G.JsonConverters.ChatCompletionStreamResponseDeltaRoleNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestResponseFormatDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestResponseFormatDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestServiceTierJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestServiceTierNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestFunctionCallJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionRequestFunctionCallNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionResponseChoiceFinishReasonJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionResponseChoiceFinishReasonNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionResponseServiceTierJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionResponseServiceTierNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionResponseObjectJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseObjectJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobHyperparametersNEpochsJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobHyperparametersNEpochsNullableJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobObjectJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobObjectNullableJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobStatusJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobStatusNullableJsonConverter), - typeof(global::G.JsonConverters.FineTuningIntegrationTypeJsonConverter), - typeof(global::G.JsonConverters.FineTuningIntegrationTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ListPaginatedFineTuningJobsResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ListPaginatedFineTuningJobsResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseChoiceFinishReasonJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseChoiceFinishReasonNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseServiceTierJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseServiceTierNullableJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseObjectJsonConverter), - typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestQualityJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestQualityNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestResponseFormatJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestResponseFormatNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestSizeJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestSizeNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestStyleJsonConverter), - typeof(global::G.JsonConverters.CreateImageRequestStyleNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageEditRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateImageEditRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageEditRequestSizeJsonConverter), - typeof(global::G.JsonConverters.CreateImageEditRequestSizeNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageEditRequestResponseFormatJsonConverter), - typeof(global::G.JsonConverters.CreateImageEditRequestResponseFormatNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageVariationRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateImageVariationRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageVariationRequestResponseFormatJsonConverter), - typeof(global::G.JsonConverters.CreateImageVariationRequestResponseFormatNullableJsonConverter), - typeof(global::G.JsonConverters.CreateImageVariationRequestSizeJsonConverter), - typeof(global::G.JsonConverters.CreateImageVariationRequestSizeNullableJsonConverter), - typeof(global::G.JsonConverters.CreateModerationRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateModerationRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.OpenAIFileObjectJsonConverter), - typeof(global::G.JsonConverters.OpenAIFileObjectNullableJsonConverter), - typeof(global::G.JsonConverters.OpenAIFilePurposeJsonConverter), - typeof(global::G.JsonConverters.OpenAIFilePurposeNullableJsonConverter), - typeof(global::G.JsonConverters.OpenAIFileStatusJsonConverter), - typeof(global::G.JsonConverters.OpenAIFileStatusNullableJsonConverter), - typeof(global::G.JsonConverters.ListFilesResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ListFilesResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateFileRequestPurposeJsonConverter), - typeof(global::G.JsonConverters.CreateFileRequestPurposeNullableJsonConverter), - typeof(global::G.JsonConverters.DeleteFileResponseObjectJsonConverter), - typeof(global::G.JsonConverters.DeleteFileResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateUploadRequestPurposeJsonConverter), - typeof(global::G.JsonConverters.CreateUploadRequestPurposeNullableJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeNullableJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierNullableJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsNullableJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestIntegrationTypeJsonConverter), - typeof(global::G.JsonConverters.CreateFineTuningJobRequestIntegrationTypeNullableJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobEventLevelJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobEventLevelNullableJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobEventObjectJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobEventObjectNullableJsonConverter), - typeof(global::G.JsonConverters.ListFineTuningJobEventsResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ListFineTuningJobEventsResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobCheckpointObjectJsonConverter), - typeof(global::G.JsonConverters.FineTuningJobCheckpointObjectNullableJsonConverter), - typeof(global::G.JsonConverters.ListFineTuningJobCheckpointsResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ListFineTuningJobCheckpointsResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateEmbeddingRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateEmbeddingRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateEmbeddingRequestEncodingFormatJsonConverter), - typeof(global::G.JsonConverters.CreateEmbeddingRequestEncodingFormatNullableJsonConverter), - typeof(global::G.JsonConverters.EmbeddingObjectJsonConverter), - typeof(global::G.JsonConverters.EmbeddingObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateEmbeddingResponseObjectJsonConverter), - typeof(global::G.JsonConverters.CreateEmbeddingResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.CreateTranscriptionRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateTranscriptionRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateTranscriptionRequestResponseFormatJsonConverter), - typeof(global::G.JsonConverters.CreateTranscriptionRequestResponseFormatNullableJsonConverter), - typeof(global::G.JsonConverters.CreateTranscriptionRequestTimestampGranularitieJsonConverter), - typeof(global::G.JsonConverters.CreateTranscriptionRequestTimestampGranularitieNullableJsonConverter), - typeof(global::G.JsonConverters.CreateTranslationRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateTranslationRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateSpeechRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateSpeechRequestModelNullableJsonConverter), - typeof(global::G.JsonConverters.CreateSpeechRequestVoiceJsonConverter), - typeof(global::G.JsonConverters.CreateSpeechRequestVoiceNullableJsonConverter), - typeof(global::G.JsonConverters.CreateSpeechRequestResponseFormatJsonConverter), - typeof(global::G.JsonConverters.CreateSpeechRequestResponseFormatNullableJsonConverter), - typeof(global::G.JsonConverters.UploadStatusJsonConverter), - typeof(global::G.JsonConverters.UploadStatusNullableJsonConverter), - typeof(global::G.JsonConverters.UploadObjectJsonConverter), - typeof(global::G.JsonConverters.UploadObjectNullableJsonConverter), - typeof(global::G.JsonConverters.UploadPartObjectJsonConverter), - typeof(global::G.JsonConverters.UploadPartObjectNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantsApiResponseFormatOptionEnumJsonConverter), - typeof(global::G.JsonConverters.AssistantsApiResponseFormatOptionEnumNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantObjectObjectJsonConverter), - typeof(global::G.JsonConverters.AssistantObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsCodeTypeJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsCodeTypeNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsFunctionTypeJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsFunctionTypeNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantObjectToolDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.AssistantObjectToolDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionToolTypeJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionToolTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionToolChoiceOptionEnumJsonConverter), + typeof(global::G.JsonConverters.ChatCompletionToolChoiceOptionEnumNullableJsonConverter), + typeof(global::G.JsonConverters.StaticChunkingStrategyRequestParamTypeJsonConverter), + typeof(global::G.JsonConverters.StaticChunkingStrategyRequestParamTypeNullableJsonConverter), + typeof(global::G.JsonConverters.ChunkingStrategyRequestParamDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.ChunkingStrategyRequestParamDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CostsResultObjectJsonConverter), + typeof(global::G.JsonConverters.CostsResultObjectNullableJsonConverter), typeof(global::G.JsonConverters.CreateAssistantRequestModelJsonConverter), typeof(global::G.JsonConverters.CreateAssistantRequestModelNullableJsonConverter), typeof(global::G.JsonConverters.CreateAssistantRequestToolDiscriminatorTypeJsonConverter), @@ -209,52 +295,136 @@ internal sealed partial class JsonSerializerContextConverters typeof(global::G.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeNullableJsonConverter), typeof(global::G.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ModifyAssistantRequestToolDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.ModifyAssistantRequestToolDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.DeleteAssistantResponseObjectJsonConverter), - typeof(global::G.JsonConverters.DeleteAssistantResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeJsonConverter), - typeof(global::G.JsonConverters.AssistantToolsFileSearchTypeOnlyTypeNullableJsonConverter), - typeof(global::G.JsonConverters.TruncationObjectTypeJsonConverter), - typeof(global::G.JsonConverters.TruncationObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantsApiToolChoiceOptionEnumJsonConverter), - typeof(global::G.JsonConverters.AssistantsApiToolChoiceOptionEnumNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantsNamedToolChoiceTypeJsonConverter), - typeof(global::G.JsonConverters.AssistantsNamedToolChoiceTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunObjectObjectJsonConverter), - typeof(global::G.JsonConverters.RunObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.RunObjectStatusJsonConverter), - typeof(global::G.JsonConverters.RunObjectStatusNullableJsonConverter), - typeof(global::G.JsonConverters.RunObjectRequiredActionTypeJsonConverter), - typeof(global::G.JsonConverters.RunObjectRequiredActionTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunToolCallObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunToolCallObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunObjectLastErrorCodeJsonConverter), - typeof(global::G.JsonConverters.RunObjectLastErrorCodeNullableJsonConverter), - typeof(global::G.JsonConverters.RunObjectIncompleteDetailsReasonJsonConverter), - typeof(global::G.JsonConverters.RunObjectIncompleteDetailsReasonNullableJsonConverter), - typeof(global::G.JsonConverters.RunObjectToolDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.RunObjectToolDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.CreateRunRequestModelJsonConverter), - typeof(global::G.JsonConverters.CreateRunRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseChoiceFinishReasonNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseObjectJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionFunctionResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.PredictionContentTypeJsonConverter), + typeof(global::G.JsonConverters.PredictionContentTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestAudioVoiceJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestAudioVoiceNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestAudioFormatJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestAudioFormatNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestResponseFormatDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestResponseFormatDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestServiceTierJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestServiceTierNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestFunctionCallJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionRequestFunctionCallNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionResponseChoiceFinishReasonJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionResponseChoiceFinishReasonNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionResponseServiceTierJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionResponseServiceTierNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionResponseObjectJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseChoiceFinishReasonJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseChoiceFinishReasonNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseServiceTierJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseServiceTierNullableJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseObjectJsonConverter), + typeof(global::G.JsonConverters.CreateChatCompletionStreamResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.CreateCompletionRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateCompletionRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateCompletionResponseChoiceFinishReasonJsonConverter), + typeof(global::G.JsonConverters.CreateCompletionResponseChoiceFinishReasonNullableJsonConverter), + typeof(global::G.JsonConverters.CreateCompletionResponseObjectJsonConverter), + typeof(global::G.JsonConverters.CreateCompletionResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.CreateEmbeddingRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateEmbeddingRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateEmbeddingRequestEncodingFormatJsonConverter), + typeof(global::G.JsonConverters.CreateEmbeddingRequestEncodingFormatNullableJsonConverter), + typeof(global::G.JsonConverters.EmbeddingObjectJsonConverter), + typeof(global::G.JsonConverters.EmbeddingObjectNullableJsonConverter), + typeof(global::G.JsonConverters.CreateEmbeddingResponseObjectJsonConverter), + typeof(global::G.JsonConverters.CreateEmbeddingResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.CreateFileRequestPurposeJsonConverter), + typeof(global::G.JsonConverters.CreateFileRequestPurposeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersBatchSizeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersLearningRateMultiplierNullableJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestHyperparametersNEpochsNullableJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestIntegrationTypeJsonConverter), + typeof(global::G.JsonConverters.CreateFineTuningJobRequestIntegrationTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageEditRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateImageEditRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageEditRequestSizeJsonConverter), + typeof(global::G.JsonConverters.CreateImageEditRequestSizeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageEditRequestResponseFormatJsonConverter), + typeof(global::G.JsonConverters.CreateImageEditRequestResponseFormatNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestQualityJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestQualityNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestResponseFormatJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestResponseFormatNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestSizeJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestSizeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestStyleJsonConverter), + typeof(global::G.JsonConverters.CreateImageRequestStyleNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageVariationRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateImageVariationRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageVariationRequestResponseFormatJsonConverter), + typeof(global::G.JsonConverters.CreateImageVariationRequestResponseFormatNullableJsonConverter), + typeof(global::G.JsonConverters.CreateImageVariationRequestSizeJsonConverter), + typeof(global::G.JsonConverters.CreateImageVariationRequestSizeNullableJsonConverter), typeof(global::G.JsonConverters.CreateMessageRequestRoleJsonConverter), typeof(global::G.JsonConverters.CreateMessageRequestRoleNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageFileObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageFileObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageFileObjectImageFileDetailJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageFileObjectImageFileDetailNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageUrlObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageUrlObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageUrlObjectImageUrlDetailJsonConverter), - typeof(global::G.JsonConverters.MessageContentImageUrlObjectImageUrlDetailNullableJsonConverter), typeof(global::G.JsonConverters.MessageRequestContentTextObjectTypeJsonConverter), typeof(global::G.JsonConverters.MessageRequestContentTextObjectTypeNullableJsonConverter), typeof(global::G.JsonConverters.CreateMessageRequestContentVariant2ItemDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.CreateMessageRequestContentVariant2ItemDiscriminatorTypeNullableJsonConverter), typeof(global::G.JsonConverters.CreateMessageRequestAttachmentToolDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.CreateMessageRequestAttachmentToolDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemVariant1TypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemVariant2TypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestInputVariant3ItemDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateModerationRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstructionNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinorNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemJsonConverter), + typeof(global::G.JsonConverters.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItemNullableJsonConverter), + typeof(global::G.JsonConverters.CreateRunRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateRunRequestModelNullableJsonConverter), typeof(global::G.JsonConverters.CreateRunRequestToolDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.CreateRunRequestToolDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateSpeechRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateSpeechRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateSpeechRequestVoiceJsonConverter), + typeof(global::G.JsonConverters.CreateSpeechRequestVoiceNullableJsonConverter), + typeof(global::G.JsonConverters.CreateSpeechRequestResponseFormatJsonConverter), + typeof(global::G.JsonConverters.CreateSpeechRequestResponseFormatNullableJsonConverter), typeof(global::G.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeJsonConverter), typeof(global::G.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1TypeNullableJsonConverter), typeof(global::G.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2TypeJsonConverter), @@ -265,122 +435,76 @@ internal sealed partial class JsonSerializerContextConverters typeof(global::G.JsonConverters.CreateThreadAndRunRequestModelNullableJsonConverter), typeof(global::G.JsonConverters.CreateThreadAndRunRequestToolDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.CreateThreadAndRunRequestToolDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ThreadObjectObjectJsonConverter), - typeof(global::G.JsonConverters.ThreadObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.DeleteThreadResponseObjectJsonConverter), - typeof(global::G.JsonConverters.DeleteThreadResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.MessageObjectObjectJsonConverter), - typeof(global::G.JsonConverters.MessageObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.MessageObjectStatusJsonConverter), - typeof(global::G.JsonConverters.MessageObjectStatusNullableJsonConverter), - typeof(global::G.JsonConverters.MessageObjectIncompleteDetailsReasonJsonConverter), - typeof(global::G.JsonConverters.MessageObjectIncompleteDetailsReasonNullableJsonConverter), - typeof(global::G.JsonConverters.MessageObjectRoleJsonConverter), - typeof(global::G.JsonConverters.MessageObjectRoleNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextObjectTextAnnotationDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.MessageContentTextObjectTextAnnotationDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageContentRefusalObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageContentRefusalObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageObjectContentItemDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.MessageObjectContentItemDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageObjectAttachmentToolDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.MessageObjectAttachmentToolDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaObjectObjectJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaObjectDeltaRoleJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaObjectDeltaRoleNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageFileObjectImageFileDetailNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFileCitationObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextAnnotationsFilePathObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTextAnnotationDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentTextObjectTextAnnotationDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentRefusalObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentRefusalObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaContentImageUrlObjectImageUrlDetailNullableJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaObjectDeltaContentItemDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.MessageDeltaObjectDeltaContentItemDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.DeleteMessageResponseObjectJsonConverter), - typeof(global::G.JsonConverters.DeleteMessageResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectObjectJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectStatusJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectStatusNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsMessageCreationObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsMessageCreationObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectToolCallDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsObjectToolCallDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectStepDetailsDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectStepDetailsDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectLastErrorCodeJsonConverter), - typeof(global::G.JsonConverters.RunStepObjectLastErrorCodeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaObjectObjectJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsMessageCreationObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFileSearchObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsFunctionObjectTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaObjectDeltaStepDetailsDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaObjectDeltaStepDetailsDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.CreateTranscriptionRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateTranscriptionRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateTranscriptionRequestTimestampGranularitieJsonConverter), + typeof(global::G.JsonConverters.CreateTranscriptionRequestTimestampGranularitieNullableJsonConverter), + typeof(global::G.JsonConverters.CreateTranslationRequestModelJsonConverter), + typeof(global::G.JsonConverters.CreateTranslationRequestModelNullableJsonConverter), + typeof(global::G.JsonConverters.CreateUploadRequestPurposeJsonConverter), + typeof(global::G.JsonConverters.CreateUploadRequestPurposeNullableJsonConverter), typeof(global::G.JsonConverters.VectorStoreExpirationAfterAnchorJsonConverter), typeof(global::G.JsonConverters.VectorStoreExpirationAfterAnchorNullableJsonConverter), - typeof(global::G.JsonConverters.VectorStoreObjectObjectJsonConverter), - typeof(global::G.JsonConverters.VectorStoreObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.VectorStoreObjectStatusJsonConverter), - typeof(global::G.JsonConverters.VectorStoreObjectStatusNullableJsonConverter), - typeof(global::G.JsonConverters.AutoChunkingStrategyRequestParamTypeJsonConverter), - typeof(global::G.JsonConverters.AutoChunkingStrategyRequestParamTypeNullableJsonConverter), - typeof(global::G.JsonConverters.StaticChunkingStrategyRequestParamTypeJsonConverter), - typeof(global::G.JsonConverters.StaticChunkingStrategyRequestParamTypeNullableJsonConverter), typeof(global::G.JsonConverters.CreateVectorStoreRequestChunkingStrategyDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.CreateVectorStoreRequestChunkingStrategyDiscriminatorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.DeleteAssistantResponseObjectJsonConverter), + typeof(global::G.JsonConverters.DeleteAssistantResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.DeleteFileResponseObjectJsonConverter), + typeof(global::G.JsonConverters.DeleteFileResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.DeleteMessageResponseObjectJsonConverter), + typeof(global::G.JsonConverters.DeleteMessageResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.DeleteThreadResponseObjectJsonConverter), + typeof(global::G.JsonConverters.DeleteThreadResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.DeleteVectorStoreFileResponseObjectJsonConverter), + typeof(global::G.JsonConverters.DeleteVectorStoreFileResponseObjectNullableJsonConverter), typeof(global::G.JsonConverters.DeleteVectorStoreResponseObjectJsonConverter), typeof(global::G.JsonConverters.DeleteVectorStoreResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.FineTuningIntegrationTypeJsonConverter), + typeof(global::G.JsonConverters.FineTuningIntegrationTypeNullableJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobHyperparametersNEpochsJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobHyperparametersNEpochsNullableJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobObjectJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobObjectNullableJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobStatusJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobStatusNullableJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobCheckpointObjectJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobCheckpointObjectNullableJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobEventLevelJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobEventLevelNullableJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobEventObjectJsonConverter), + typeof(global::G.JsonConverters.FineTuningJobEventObjectNullableJsonConverter), + typeof(global::G.JsonConverters.InviteObjectJsonConverter), + typeof(global::G.JsonConverters.InviteObjectNullableJsonConverter), + typeof(global::G.JsonConverters.InviteRoleJsonConverter), + typeof(global::G.JsonConverters.InviteRoleNullableJsonConverter), + typeof(global::G.JsonConverters.InviteStatusJsonConverter), + typeof(global::G.JsonConverters.InviteStatusNullableJsonConverter), + typeof(global::G.JsonConverters.InviteDeleteResponseObjectJsonConverter), + typeof(global::G.JsonConverters.InviteDeleteResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.InviteListResponseObjectJsonConverter), + typeof(global::G.JsonConverters.InviteListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.InviteRequestRoleJsonConverter), + typeof(global::G.JsonConverters.InviteRequestRoleNullableJsonConverter), + typeof(global::G.JsonConverters.ListAuditLogsResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ListAuditLogsResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ListBatchesResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ListBatchesResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.OpenAIFileObjectJsonConverter), + typeof(global::G.JsonConverters.OpenAIFileObjectNullableJsonConverter), + typeof(global::G.JsonConverters.OpenAIFilePurposeJsonConverter), + typeof(global::G.JsonConverters.OpenAIFilePurposeNullableJsonConverter), + typeof(global::G.JsonConverters.OpenAIFileStatusJsonConverter), + typeof(global::G.JsonConverters.OpenAIFileStatusNullableJsonConverter), + typeof(global::G.JsonConverters.ListFineTuningJobCheckpointsResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ListFineTuningJobCheckpointsResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ListFineTuningJobEventsResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ListFineTuningJobEventsResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ListModelsResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ListModelsResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ModelObjectJsonConverter), + typeof(global::G.JsonConverters.ModelObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ListPaginatedFineTuningJobsResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ListPaginatedFineTuningJobsResponseObjectNullableJsonConverter), typeof(global::G.JsonConverters.VectorStoreFileObjectObjectJsonConverter), typeof(global::G.JsonConverters.VectorStoreFileObjectObjectNullableJsonConverter), typeof(global::G.JsonConverters.VectorStoreFileObjectStatusJsonConverter), @@ -393,232 +517,338 @@ internal sealed partial class JsonSerializerContextConverters typeof(global::G.JsonConverters.OtherChunkingStrategyResponseParamTypeNullableJsonConverter), typeof(global::G.JsonConverters.VectorStoreFileObjectChunkingStrategyDiscriminatorTypeJsonConverter), typeof(global::G.JsonConverters.VectorStoreFileObjectChunkingStrategyDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ChunkingStrategyRequestParamDiscriminatorTypeJsonConverter), - typeof(global::G.JsonConverters.ChunkingStrategyRequestParamDiscriminatorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.DeleteVectorStoreFileResponseObjectJsonConverter), - typeof(global::G.JsonConverters.DeleteVectorStoreFileResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.VectorStoreFileBatchObjectObjectJsonConverter), - typeof(global::G.JsonConverters.VectorStoreFileBatchObjectObjectNullableJsonConverter), - typeof(global::G.JsonConverters.VectorStoreFileBatchObjectStatusJsonConverter), - typeof(global::G.JsonConverters.VectorStoreFileBatchObjectStatusNullableJsonConverter), - typeof(global::G.JsonConverters.ErrorEventEventJsonConverter), - typeof(global::G.JsonConverters.ErrorEventEventNullableJsonConverter), - typeof(global::G.JsonConverters.DoneEventEventJsonConverter), - typeof(global::G.JsonConverters.DoneEventEventNullableJsonConverter), - typeof(global::G.JsonConverters.DoneEventDataJsonConverter), - typeof(global::G.JsonConverters.DoneEventDataNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant3EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant3EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant4EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant4EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant5EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant5EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant6EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant6EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant7EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant7EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant8EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant8EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant9EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant9EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant10EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant10EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant11EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant11EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant12EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant12EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant13EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant13EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant14EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant14EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant15EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant15EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant16EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant16EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant17EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant17EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant18EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant18EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant19EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant19EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant20EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant20EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant21EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant21EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant22EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant22EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant23EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant23EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant24EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant24EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant25EventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventVariant25EventNullableJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventDiscriminatorEventJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventDiscriminatorEventNullableJsonConverter), - typeof(global::G.JsonConverters.BatchObjectJsonConverter), - typeof(global::G.JsonConverters.BatchObjectNullableJsonConverter), - typeof(global::G.JsonConverters.BatchStatusJsonConverter), - typeof(global::G.JsonConverters.BatchStatusNullableJsonConverter), - typeof(global::G.JsonConverters.BatchRequestInputMethodJsonConverter), - typeof(global::G.JsonConverters.BatchRequestInputMethodNullableJsonConverter), - typeof(global::G.JsonConverters.ListBatchesResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ListBatchesResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.AuditLogActorApiKeyTypeJsonConverter), - typeof(global::G.JsonConverters.AuditLogActorApiKeyTypeNullableJsonConverter), - typeof(global::G.JsonConverters.AuditLogActorTypeJsonConverter), - typeof(global::G.JsonConverters.AuditLogActorTypeNullableJsonConverter), - typeof(global::G.JsonConverters.AuditLogEventTypeJsonConverter), - typeof(global::G.JsonConverters.AuditLogEventTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ListAuditLogsResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ListAuditLogsResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.InviteObjectJsonConverter), - typeof(global::G.JsonConverters.InviteObjectNullableJsonConverter), - typeof(global::G.JsonConverters.InviteRoleJsonConverter), - typeof(global::G.JsonConverters.InviteRoleNullableJsonConverter), - typeof(global::G.JsonConverters.InviteStatusJsonConverter), - typeof(global::G.JsonConverters.InviteStatusNullableJsonConverter), - typeof(global::G.JsonConverters.InviteListResponseObjectJsonConverter), - typeof(global::G.JsonConverters.InviteListResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.InviteRequestRoleJsonConverter), - typeof(global::G.JsonConverters.InviteRequestRoleNullableJsonConverter), - typeof(global::G.JsonConverters.InviteDeleteResponseObjectJsonConverter), - typeof(global::G.JsonConverters.InviteDeleteResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.UserObjectJsonConverter), - typeof(global::G.JsonConverters.UserObjectNullableJsonConverter), - typeof(global::G.JsonConverters.UserRoleJsonConverter), - typeof(global::G.JsonConverters.UserRoleNullableJsonConverter), - typeof(global::G.JsonConverters.UserListResponseObjectJsonConverter), - typeof(global::G.JsonConverters.UserListResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.UserRoleUpdateRequestRoleJsonConverter), - typeof(global::G.JsonConverters.UserRoleUpdateRequestRoleNullableJsonConverter), - typeof(global::G.JsonConverters.UserDeleteResponseObjectJsonConverter), - typeof(global::G.JsonConverters.UserDeleteResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.VectorStoreObjectObjectJsonConverter), + typeof(global::G.JsonConverters.VectorStoreObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.VectorStoreObjectStatusJsonConverter), + typeof(global::G.JsonConverters.VectorStoreObjectStatusNullableJsonConverter), + typeof(global::G.JsonConverters.ModifyAssistantRequestToolDiscriminatorTypeJsonConverter), + typeof(global::G.JsonConverters.ModifyAssistantRequestToolDiscriminatorTypeNullableJsonConverter), typeof(global::G.JsonConverters.ProjectObjectJsonConverter), typeof(global::G.JsonConverters.ProjectObjectNullableJsonConverter), typeof(global::G.JsonConverters.ProjectStatusJsonConverter), typeof(global::G.JsonConverters.ProjectStatusNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectListResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ProjectListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyOwnerTypeJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyOwnerTypeNullableJsonConverter), typeof(global::G.JsonConverters.ProjectUserObjectJsonConverter), typeof(global::G.JsonConverters.ProjectUserObjectNullableJsonConverter), typeof(global::G.JsonConverters.ProjectUserRoleJsonConverter), typeof(global::G.JsonConverters.ProjectUserRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectUserCreateRequestRoleJsonConverter), - typeof(global::G.JsonConverters.ProjectUserCreateRequestRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectUserUpdateRequestRoleJsonConverter), - typeof(global::G.JsonConverters.ProjectUserUpdateRequestRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectUserDeleteResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ProjectUserDeleteResponseObjectNullableJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountObjectJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountObjectNullableJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountRoleJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectServiceAccountListResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ProjectServiceAccountListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyDeleteResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyDeleteResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyListResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectApiKeyListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectListResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectRateLimitObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectRateLimitObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectRateLimitListResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectRateLimitListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectServiceAccountApiKeyObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectServiceAccountApiKeyObjectNullableJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountCreateResponseObjectJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountCreateResponseObjectNullableJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountCreateResponseRoleJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountCreateResponseRoleNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectServiceAccountApiKeyObjectJsonConverter), - typeof(global::G.JsonConverters.ProjectServiceAccountApiKeyObjectNullableJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountDeleteResponseObjectJsonConverter), typeof(global::G.JsonConverters.ProjectServiceAccountDeleteResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyObjectJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyObjectNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyOwnerTypeJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyOwnerTypeNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyListResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyListResponseObjectNullableJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyDeleteResponseObjectJsonConverter), - typeof(global::G.JsonConverters.ProjectApiKeyDeleteResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectServiceAccountListResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectServiceAccountListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectUserCreateRequestRoleJsonConverter), + typeof(global::G.JsonConverters.ProjectUserCreateRequestRoleNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectUserDeleteResponseObjectJsonConverter), + typeof(global::G.JsonConverters.ProjectUserDeleteResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.ProjectUserUpdateRequestRoleJsonConverter), + typeof(global::G.JsonConverters.ProjectUserUpdateRequestRoleNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventConversationItemCreateTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventConversationItemCreateTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemObjectJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemObjectNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemStatusJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemStatusNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemRoleJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemRoleNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemContentItemTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeConversationItemContentItemTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventConversationItemDeleteTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventConversationItemDeleteTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventConversationItemTruncateTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventConversationItemTruncateTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferAppendTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferAppendTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferClearTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferClearTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferCommitTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventInputAudioBufferCommitTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventResponseCancelTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventResponseCancelTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventResponseCreateTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventResponseCreateTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionModalitieJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionModalitieNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionVoiceJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionVoiceNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionToolTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionToolTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionMaxResponseOutputTokensJsonConverter), + typeof(global::G.JsonConverters.RealtimeSessionMaxResponseOutputTokensNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventSessionUpdateTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeClientEventSessionUpdateTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseObjectJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseStatusJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseStatusNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseStatusDetailsTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseStatusDetailsTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseStatusDetailsReasonJsonConverter), + typeof(global::G.JsonConverters.RealtimeResponseStatusDetailsReasonNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationCreatedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationCreatedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemCreatedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemCreatedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemDeletedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemDeletedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemInputAudioTranscriptionFailedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemTruncatedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventConversationItemTruncatedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventErrorTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventErrorTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferClearedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferClearedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferCommittedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferCommittedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferSpeechStartedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventInputAudioBufferSpeechStoppedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventRateLimitsUpdatedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioDeltaTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioDeltaTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioDoneTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioDoneTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioTranscriptDeltaTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseAudioTranscriptDoneTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartAddedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartAddedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartAddedPartTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartDoneTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseContentPartDoneTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseCreatedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseCreatedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseDoneTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseDoneTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDeltaTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseFunctionCallArgumentsDoneTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseOutputItemAddedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseOutputItemAddedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseOutputItemDoneTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseOutputItemDoneTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseTextDeltaTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseTextDeltaTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseTextDoneTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventResponseTextDoneTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventSessionCreatedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventSessionCreatedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventSessionUpdatedTypeJsonConverter), + typeof(global::G.JsonConverters.RealtimeServerEventSessionUpdatedTypeNullableJsonConverter), + typeof(global::G.JsonConverters.UploadStatusJsonConverter), + typeof(global::G.JsonConverters.UploadStatusNullableJsonConverter), + typeof(global::G.JsonConverters.UploadObjectJsonConverter), + typeof(global::G.JsonConverters.UploadObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UploadPartObjectJsonConverter), + typeof(global::G.JsonConverters.UploadPartObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageAudioSpeechesResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageAudioSpeechesResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageAudioTranscriptionsResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageAudioTranscriptionsResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCodeInterpreterSessionsResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageCodeInterpreterSessionsResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCompletionsResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageCompletionsResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageEmbeddingsResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageEmbeddingsResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageImagesResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageImagesResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageModerationsResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageModerationsResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageResponseObjectJsonConverter), + typeof(global::G.JsonConverters.UsageResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageTimeBucketObjectJsonConverter), + typeof(global::G.JsonConverters.UsageTimeBucketObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageVectorStoresResultObjectJsonConverter), + typeof(global::G.JsonConverters.UsageVectorStoresResultObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UsageTimeBucketResultItemDiscriminatorObjectJsonConverter), + typeof(global::G.JsonConverters.UsageTimeBucketResultItemDiscriminatorObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UserObjectJsonConverter), + typeof(global::G.JsonConverters.UserObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UserRoleJsonConverter), + typeof(global::G.JsonConverters.UserRoleNullableJsonConverter), + typeof(global::G.JsonConverters.UserDeleteResponseObjectJsonConverter), + typeof(global::G.JsonConverters.UserDeleteResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UserListResponseObjectJsonConverter), + typeof(global::G.JsonConverters.UserListResponseObjectNullableJsonConverter), + typeof(global::G.JsonConverters.UserRoleUpdateRequestRoleJsonConverter), + typeof(global::G.JsonConverters.UserRoleUpdateRequestRoleNullableJsonConverter), + typeof(global::G.JsonConverters.VectorStoreFileBatchObjectObjectJsonConverter), + typeof(global::G.JsonConverters.VectorStoreFileBatchObjectObjectNullableJsonConverter), + typeof(global::G.JsonConverters.VectorStoreFileBatchObjectStatusJsonConverter), + typeof(global::G.JsonConverters.VectorStoreFileBatchObjectStatusNullableJsonConverter), typeof(global::G.JsonConverters.CreateBatchRequestEndpointJsonConverter), typeof(global::G.JsonConverters.CreateBatchRequestEndpointNullableJsonConverter), typeof(global::G.JsonConverters.CreateBatchRequestCompletionWindowJsonConverter), typeof(global::G.JsonConverters.CreateBatchRequestCompletionWindowNullableJsonConverter), typeof(global::G.JsonConverters.ListAssistantsOrderJsonConverter), typeof(global::G.JsonConverters.ListAssistantsOrderNullableJsonConverter), + typeof(global::G.JsonConverters.ListFilesOrderJsonConverter), + typeof(global::G.JsonConverters.ListFilesOrderNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCostsBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageCostsBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCostsGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageCostsGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageAudioSpeechesBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageAudioSpeechesBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageAudioSpeechesGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageAudioSpeechesGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageAudioTranscriptionsBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageAudioTranscriptionsBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageAudioTranscriptionsGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageAudioTranscriptionsGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCodeInterpreterSessionsBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageCodeInterpreterSessionsBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCodeInterpreterSessionsGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageCodeInterpreterSessionsGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCompletionsBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageCompletionsBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageCompletionsGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageCompletionsGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageEmbeddingsBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageEmbeddingsBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageEmbeddingsGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageEmbeddingsGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageImagesBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageImagesBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageImagesSourceJsonConverter), + typeof(global::G.JsonConverters.UsageImagesSourceNullableJsonConverter), + typeof(global::G.JsonConverters.UsageImagesSizeJsonConverter), + typeof(global::G.JsonConverters.UsageImagesSizeNullableJsonConverter), + typeof(global::G.JsonConverters.UsageImagesGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageImagesGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageModerationsBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageModerationsBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageModerationsGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageModerationsGroupByItemNullableJsonConverter), + typeof(global::G.JsonConverters.UsageVectorStoresBucketWidthJsonConverter), + typeof(global::G.JsonConverters.UsageVectorStoresBucketWidthNullableJsonConverter), + typeof(global::G.JsonConverters.UsageVectorStoresGroupByItemJsonConverter), + typeof(global::G.JsonConverters.UsageVectorStoresGroupByItemNullableJsonConverter), typeof(global::G.JsonConverters.ListMessagesOrderJsonConverter), typeof(global::G.JsonConverters.ListMessagesOrderNullableJsonConverter), typeof(global::G.JsonConverters.ListRunsOrderJsonConverter), typeof(global::G.JsonConverters.ListRunsOrderNullableJsonConverter), + typeof(global::G.JsonConverters.CreateRunIncludeItemJsonConverter), + typeof(global::G.JsonConverters.CreateRunIncludeItemNullableJsonConverter), typeof(global::G.JsonConverters.ListRunStepsOrderJsonConverter), typeof(global::G.JsonConverters.ListRunStepsOrderNullableJsonConverter), + typeof(global::G.JsonConverters.ListRunStepsIncludeItemJsonConverter), + typeof(global::G.JsonConverters.ListRunStepsIncludeItemNullableJsonConverter), + typeof(global::G.JsonConverters.GetRunStepIncludeItemJsonConverter), + typeof(global::G.JsonConverters.GetRunStepIncludeItemNullableJsonConverter), typeof(global::G.JsonConverters.ListVectorStoresOrderJsonConverter), typeof(global::G.JsonConverters.ListVectorStoresOrderNullableJsonConverter), - typeof(global::G.JsonConverters.ListVectorStoreFilesOrderJsonConverter), - typeof(global::G.JsonConverters.ListVectorStoreFilesOrderNullableJsonConverter), - typeof(global::G.JsonConverters.ListVectorStoreFilesFilterJsonConverter), - typeof(global::G.JsonConverters.ListVectorStoreFilesFilterNullableJsonConverter), typeof(global::G.JsonConverters.ListFilesInVectorStoreBatchOrderJsonConverter), typeof(global::G.JsonConverters.ListFilesInVectorStoreBatchOrderNullableJsonConverter), typeof(global::G.JsonConverters.ListFilesInVectorStoreBatchFilterJsonConverter), typeof(global::G.JsonConverters.ListFilesInVectorStoreBatchFilterNullableJsonConverter), + typeof(global::G.JsonConverters.ListVectorStoreFilesOrderJsonConverter), + typeof(global::G.JsonConverters.ListVectorStoreFilesOrderNullableJsonConverter), + typeof(global::G.JsonConverters.ListVectorStoreFilesFilterJsonConverter), + typeof(global::G.JsonConverters.ListVectorStoreFilesFilterNullableJsonConverter), + typeof(global::G.JsonConverters.ToolsItemJsonConverter), + typeof(global::G.JsonConverters.AssistantsApiResponseFormatOptionJsonConverter), + typeof(global::G.JsonConverters.AssistantStreamEventJsonConverter), + typeof(global::G.JsonConverters.ToolsItem8JsonConverter), + typeof(global::G.JsonConverters.AssistantsApiToolChoiceOptionJsonConverter), + typeof(global::G.JsonConverters.RunStepObjectStepDetailsJsonConverter), + typeof(global::G.JsonConverters.ToolCallsItem2JsonConverter), + typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaObjectDeltaStepDetailsJsonConverter), + typeof(global::G.JsonConverters.ToolCallsItemJsonConverter), + typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputJsonConverter), + typeof(global::G.JsonConverters.ContentItem2JsonConverter), + typeof(global::G.JsonConverters.AnnotationsItemJsonConverter), + typeof(global::G.JsonConverters.ToolsItem6JsonConverter), + typeof(global::G.JsonConverters.ContentItemJsonConverter), + typeof(global::G.JsonConverters.AnnotationsItem2JsonConverter), + typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageContentPartJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestMessageJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestSystemMessageContentPartJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestUserMessageContentPartJsonConverter), - typeof(global::G.JsonConverters.ChatCompletionRequestAssistantMessageContentPartJsonConverter), typeof(global::G.JsonConverters.ChatCompletionRequestToolMessageContentPartJsonConverter), - typeof(global::G.JsonConverters.FineTuneChatCompletionRequestAssistantMessageJsonConverter), typeof(global::G.JsonConverters.ChatCompletionToolChoiceOptionJsonConverter), - typeof(global::G.JsonConverters.ResponseFormatJsonConverter), - typeof(global::G.JsonConverters.AssistantsApiResponseFormatOptionJsonConverter), - typeof(global::G.JsonConverters.ToolsItemJsonConverter), + typeof(global::G.JsonConverters.ChunkingStrategyRequestParamJsonConverter), typeof(global::G.JsonConverters.ToolsItem2JsonConverter), typeof(global::G.JsonConverters.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyJsonConverter), + typeof(global::G.JsonConverters.ResponseFormatJsonConverter), + typeof(global::G.JsonConverters.ContentVariant2ItemJsonConverter), typeof(global::G.JsonConverters.ToolsItem3JsonConverter), - typeof(global::G.JsonConverters.AssistantsApiToolChoiceOptionJsonConverter), + typeof(global::G.JsonConverters.InputVariant3ItemJsonConverter), typeof(global::G.JsonConverters.ToolsItem4JsonConverter), - typeof(global::G.JsonConverters.ContentVariant2ItemJsonConverter), - typeof(global::G.JsonConverters.ToolsItem8JsonConverter), - typeof(global::G.JsonConverters.ToolsItem5JsonConverter), typeof(global::G.JsonConverters.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyJsonConverter), - typeof(global::G.JsonConverters.ToolsItem6JsonConverter), - typeof(global::G.JsonConverters.ContentItemJsonConverter), - typeof(global::G.JsonConverters.AnnotationsItemJsonConverter), - typeof(global::G.JsonConverters.ToolsItem7JsonConverter), - typeof(global::G.JsonConverters.ContentItem2JsonConverter), - typeof(global::G.JsonConverters.AnnotationsItem2JsonConverter), - typeof(global::G.JsonConverters.RunStepObjectStepDetailsJsonConverter), - typeof(global::G.JsonConverters.ToolCallsItemJsonConverter), - typeof(global::G.JsonConverters.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputJsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaObjectDeltaStepDetailsJsonConverter), - typeof(global::G.JsonConverters.ToolCallsItem2JsonConverter), - typeof(global::G.JsonConverters.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputJsonConverter), + typeof(global::G.JsonConverters.ToolsItem5JsonConverter), typeof(global::G.JsonConverters.CreateVectorStoreRequestChunkingStrategyJsonConverter), + typeof(global::G.JsonConverters.FineTuneChatCompletionRequestAssistantMessageJsonConverter), typeof(global::G.JsonConverters.VectorStoreFileObjectChunkingStrategyJsonConverter), - typeof(global::G.JsonConverters.ChunkingStrategyRequestParamJsonConverter), - typeof(global::G.JsonConverters.AssistantStreamEventJsonConverter), - typeof(global::G.JsonConverters.AnyOfJsonConverter), - typeof(global::G.JsonConverters.OneOfJsonConverter, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>), - typeof(global::G.JsonConverters.OneOfJsonConverter>), + typeof(global::G.JsonConverters.ToolsItem7JsonConverter), + typeof(global::G.JsonConverters.ResultItemJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter>), typeof(global::G.JsonConverters.OneOfJsonConverter>), typeof(global::G.JsonConverters.OneOfJsonConverter>), - typeof(global::G.JsonConverters.OneOfJsonConverter>), typeof(global::G.JsonConverters.OneOfJsonConverter>), + typeof(global::G.JsonConverters.AnyOfJsonConverter), typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter>), typeof(global::G.JsonConverters.OneOfJsonConverter>), typeof(global::G.JsonConverters.OneOfJsonConverter), - typeof(global::G.JsonConverters.OneOfJsonConverter), - typeof(global::G.JsonConverters.OneOfJsonConverter), - typeof(global::G.JsonConverters.AnyOfJsonConverter), - typeof(global::G.JsonConverters.AnyOfJsonConverter), - typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>), typeof(global::G.JsonConverters.OneOfJsonConverter>), - typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>), + typeof(global::G.JsonConverters.AnyOfJsonConverter), typeof(global::G.JsonConverters.AnyOfJsonConverter), typeof(global::G.JsonConverters.OneOfJsonConverter), typeof(global::G.JsonConverters.OneOfJsonConverter), typeof(global::G.JsonConverters.OneOfJsonConverter), - typeof(global::G.JsonConverters.OneOfJsonConverter, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>), - typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter>), + typeof(global::G.JsonConverters.OneOfJsonConverter, global::System.Collections.Generic.IList>), + typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.AnyOfJsonConverter), typeof(global::G.JsonConverters.AnyOfJsonConverter), typeof(global::G.JsonConverters.AnyOfJsonConverter), - typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter), typeof(global::G.JsonConverters.OneOfJsonConverter), - typeof(global::G.JsonConverters.AnyOfJsonConverter), - typeof(global::G.JsonConverters.AnyOfJsonConverter), - typeof(global::G.JsonConverters.OneOfJsonConverter>), - typeof(global::G.JsonConverters.AnyOfJsonConverter), + typeof(global::G.JsonConverters.OneOfJsonConverter), typeof(global::G.JsonConverters.OneOfJsonConverter), typeof(global::G.JsonConverters.OneOfJsonConverter), typeof(global::G.JsonConverters.UnixTimestampJsonConverter), diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextTypes.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextTypes.g.verified.cs index ccc14d143f..7f045bbb00 100644 --- a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextTypes.g.verified.cs +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#JsonSerializerContextTypes.g.verified.cs @@ -19,3186 +19,4186 @@ public sealed partial class JsonSerializerContextTypes /// /// /// - public global::G.Error? Type0 { get; set; } + public global::G.AddUploadPartRequest? Type0 { get; set; } /// /// /// - public string? Type1 { get; set; } + public byte[]? Type1 { get; set; } /// /// /// - public global::G.ErrorResponse? Type2 { get; set; } + public global::G.AssistantObject? Type2 { get; set; } /// /// /// - public global::G.ListModelsResponse? Type3 { get; set; } + public string? Type3 { get; set; } /// /// /// - public global::G.ListModelsResponseObject? Type4 { get; set; } + public global::G.AssistantObjectObject? Type4 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type5 { get; set; } + public global::System.DateTimeOffset? Type5 { get; set; } /// /// /// - public global::G.Model12? Type6 { get; set; } + public global::System.Collections.Generic.IList? Type6 { get; set; } /// /// /// - public global::System.DateTimeOffset? Type7 { get; set; } + public global::G.ToolsItem? Type7 { get; set; } /// /// /// - public global::G.ModelObject? Type8 { get; set; } + public global::G.AssistantToolsCode? Type8 { get; set; } /// /// /// - public global::G.DeleteModelResponse? Type9 { get; set; } + public global::G.AssistantToolsCodeType? Type9 { get; set; } /// /// /// - public bool? Type10 { get; set; } + public global::G.AssistantToolsFileSearch? Type10 { get; set; } /// /// /// - public global::G.CreateCompletionRequest? Type11 { get; set; } + public global::G.AssistantToolsFileSearchType? Type11 { get; set; } /// /// /// - public global::G.AnyOf? Type12 { get; set; } + public global::G.AssistantToolsFileSearchFileSearch? Type12 { get; set; } /// /// /// - public global::G.CreateCompletionRequestModel? Type13 { get; set; } + public int? Type13 { get; set; } /// /// /// - public global::G.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type14 { get; set; } + public global::G.FileSearchRankingOptions? Type14 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type15 { get; set; } + public global::G.FileSearchRankingOptionsRanker? Type15 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type16 { get; set; } + public double? Type16 { get; set; } /// /// /// - public int? Type17 { get; set; } + public global::G.AssistantToolsFunction? Type17 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type18 { get; set; } + public global::G.AssistantToolsFunctionType? Type18 { get; set; } /// /// /// - public double? Type19 { get; set; } + public global::G.FunctionObject? Type19 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type20 { get; set; } + public object? Type20 { get; set; } /// /// /// - public global::G.OneOf>? Type21 { get; set; } + public bool? Type21 { get; set; } /// /// /// - public global::G.ChatCompletionStreamOptions? Type22 { get; set; } + public global::G.AssistantObjectToolDiscriminator? Type22 { get; set; } /// /// /// - public global::G.CreateCompletionResponse? Type23 { get; set; } + public global::G.AssistantObjectToolDiscriminatorType? Type23 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type24 { get; set; } + public global::G.AssistantObjectToolResources? Type24 { get; set; } /// /// /// - public global::G.CreateCompletionResponseChoice? Type25 { get; set; } + public global::G.AssistantObjectToolResourcesCodeInterpreter? Type25 { get; set; } /// /// /// - public global::G.CreateCompletionResponseChoiceFinishReason? Type26 { get; set; } + public global::System.Collections.Generic.IList? Type26 { get; set; } /// /// /// - public global::G.CreateCompletionResponseChoiceLogprobs? Type27 { get; set; } + public global::G.AssistantObjectToolResourcesFileSearch? Type27 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type28 { get; set; } + public global::G.AssistantsApiResponseFormatOption? Type28 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type29 { get; set; } + public global::G.AssistantsApiResponseFormatOptionEnum? Type29 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type30 { get; set; } + public global::G.ResponseFormatText? Type30 { get; set; } /// /// /// - public global::G.CreateCompletionResponseObject? Type31 { get; set; } + public global::G.ResponseFormatTextType? Type31 { get; set; } /// /// /// - public global::G.CompletionUsage? Type32 { get; set; } + public global::G.ResponseFormatJsonObject? Type32 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartText? Type33 { get; set; } + public global::G.ResponseFormatJsonObjectType? Type33 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartTextType? Type34 { get; set; } + public global::G.ResponseFormatJsonSchema? Type34 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartImage? Type35 { get; set; } + public global::G.ResponseFormatJsonSchemaType? Type35 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartImageType? Type36 { get; set; } + public global::G.ResponseFormatJsonSchemaJsonSchema? Type36 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartImageImageUrl? Type37 { get; set; } + public global::G.AssistantStreamEvent? Type37 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartImageImageUrlDetail? Type38 { get; set; } + public global::G.ErrorEvent? Type38 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartRefusal? Type39 { get; set; } + public global::G.ErrorEventEvent? Type39 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageContentPartRefusalType? Type40 { get; set; } + public global::G.Error? Type40 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessage? Type41 { get; set; } + public global::G.DoneEvent? Type41 { get; set; } /// /// /// - public global::G.ChatCompletionRequestSystemMessage? Type42 { get; set; } + public global::G.DoneEventEvent? Type42 { get; set; } /// /// /// - public global::G.OneOf>? Type43 { get; set; } + public global::G.DoneEventData? Type43 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type44 { get; set; } + public global::G.AssistantStreamEventVariant3? Type44 { get; set; } /// /// /// - public global::G.ChatCompletionRequestSystemMessageContentPart? Type45 { get; set; } + public global::G.AssistantStreamEventVariant3Event? Type45 { get; set; } /// /// /// - public global::G.ChatCompletionRequestSystemMessageRole? Type46 { get; set; } + public global::G.ThreadObject? Type46 { get; set; } /// /// /// - public global::G.ChatCompletionRequestUserMessage? Type47 { get; set; } + public global::G.ThreadObjectObject? Type47 { get; set; } /// /// /// - public global::G.OneOf>? Type48 { get; set; } + public global::G.ThreadObjectToolResources? Type48 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type49 { get; set; } + public global::G.ThreadObjectToolResourcesCodeInterpreter? Type49 { get; set; } /// /// /// - public global::G.ChatCompletionRequestUserMessageContentPart? Type50 { get; set; } + public global::G.ThreadObjectToolResourcesFileSearch? Type50 { get; set; } /// /// /// - public global::G.ChatCompletionRequestUserMessageContentPartDiscriminator? Type51 { get; set; } + public global::G.AssistantStreamEventVariant4? Type51 { get; set; } /// /// /// - public global::G.ChatCompletionRequestUserMessageContentPartDiscriminatorType? Type52 { get; set; } + public global::G.AssistantStreamEventVariant4Event? Type52 { get; set; } /// /// /// - public global::G.ChatCompletionRequestUserMessageRole? Type53 { get; set; } + public global::G.RunObject? Type53 { get; set; } /// /// /// - public global::G.ChatCompletionRequestAssistantMessage? Type54 { get; set; } + public global::G.RunObjectObject? Type54 { get; set; } /// /// /// - public global::G.OneOf>? Type55 { get; set; } + public global::G.RunObjectStatus? Type55 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type56 { get; set; } + public global::G.RunObjectRequiredAction? Type56 { get; set; } /// /// /// - public global::G.ChatCompletionRequestAssistantMessageContentPart? Type57 { get; set; } + public global::G.RunObjectRequiredActionType? Type57 { get; set; } /// /// /// - public global::G.ChatCompletionRequestAssistantMessageContentPartDiscriminator? Type58 { get; set; } + public global::G.RunObjectRequiredActionSubmitToolOutputs? Type58 { get; set; } /// /// /// - public global::G.ChatCompletionRequestAssistantMessageContentPartDiscriminatorType? Type59 { get; set; } + public global::System.Collections.Generic.IList? Type59 { get; set; } /// /// /// - public global::G.ChatCompletionRequestAssistantMessageRole? Type60 { get; set; } + public global::G.RunToolCallObject? Type60 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type61 { get; set; } + public global::G.RunToolCallObjectType? Type61 { get; set; } /// /// /// - public global::G.ChatCompletionMessageToolCall? Type62 { get; set; } + public global::G.RunToolCallObjectFunction? Type62 { get; set; } /// /// /// - public global::G.ChatCompletionMessageToolCallType? Type63 { get; set; } + public global::G.RunObjectLastError? Type63 { get; set; } /// /// /// - public global::G.ChatCompletionMessageToolCallFunction? Type64 { get; set; } + public global::G.RunObjectLastErrorCode? Type64 { get; set; } /// /// /// - public global::G.ChatCompletionRequestAssistantMessageFunctionCall? Type65 { get; set; } + public global::G.RunObjectIncompleteDetails? Type65 { get; set; } /// /// /// - public global::G.ChatCompletionRequestToolMessage? Type66 { get; set; } + public global::G.RunObjectIncompleteDetailsReason? Type66 { get; set; } /// /// /// - public global::G.ChatCompletionRequestToolMessageRole? Type67 { get; set; } + public global::System.Collections.Generic.IList? Type67 { get; set; } /// /// /// - public global::G.OneOf>? Type68 { get; set; } + public global::G.ToolsItem8? Type68 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type69 { get; set; } + public global::G.RunObjectToolDiscriminator? Type69 { get; set; } /// /// /// - public global::G.ChatCompletionRequestToolMessageContentPart? Type70 { get; set; } + public global::G.RunObjectToolDiscriminatorType? Type70 { get; set; } /// /// /// - public global::G.ChatCompletionRequestFunctionMessage? Type71 { get; set; } + public global::G.RunCompletionUsage? Type71 { get; set; } /// /// /// - public global::G.ChatCompletionRequestFunctionMessageRole? Type72 { get; set; } + public global::G.TruncationObject? Type72 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageDiscriminator? Type73 { get; set; } + public global::G.TruncationObjectType? Type73 { get; set; } /// /// /// - public global::G.ChatCompletionRequestMessageDiscriminatorRole? Type74 { get; set; } + public global::G.AssistantsApiToolChoiceOption? Type74 { get; set; } /// /// /// - public global::G.FineTuneChatCompletionRequestAssistantMessage? Type75 { get; set; } + public global::G.AssistantsApiToolChoiceOptionEnum? Type75 { get; set; } /// /// /// - public global::G.FineTuneChatCompletionRequestAssistantMessageVariant1? Type76 { get; set; } + public global::G.AssistantsNamedToolChoice? Type76 { get; set; } /// /// /// - public object? Type77 { get; set; } + public global::G.AssistantsNamedToolChoiceType? Type77 { get; set; } /// /// /// - public global::G.ChatCompletionFunctions? Type78 { get; set; } + public global::G.AssistantsNamedToolChoiceFunction? Type78 { get; set; } /// /// /// - public global::G.ChatCompletionFunctionCallOption? Type79 { get; set; } + public global::G.AssistantStreamEventVariant5? Type79 { get; set; } /// /// /// - public global::G.ChatCompletionTool? Type80 { get; set; } + public global::G.AssistantStreamEventVariant5Event? Type80 { get; set; } /// /// /// - public global::G.ChatCompletionToolType? Type81 { get; set; } + public global::G.AssistantStreamEventVariant6? Type81 { get; set; } /// /// /// - public global::G.FunctionObject? Type82 { get; set; } + public global::G.AssistantStreamEventVariant6Event? Type82 { get; set; } /// /// /// - public global::G.ResponseFormatText? Type83 { get; set; } + public global::G.AssistantStreamEventVariant7? Type83 { get; set; } /// /// /// - public global::G.ResponseFormatTextType? Type84 { get; set; } + public global::G.AssistantStreamEventVariant7Event? Type84 { get; set; } /// /// /// - public global::G.ResponseFormatJsonObject? Type85 { get; set; } + public global::G.AssistantStreamEventVariant8? Type85 { get; set; } /// /// /// - public global::G.ResponseFormatJsonObjectType? Type86 { get; set; } + public global::G.AssistantStreamEventVariant8Event? Type86 { get; set; } /// /// /// - public global::G.ResponseFormatJsonSchema? Type87 { get; set; } + public global::G.AssistantStreamEventVariant9? Type87 { get; set; } /// /// /// - public global::G.ResponseFormatJsonSchemaType? Type88 { get; set; } + public global::G.AssistantStreamEventVariant9Event? Type88 { get; set; } /// /// /// - public global::G.ResponseFormatJsonSchemaJsonSchema? Type89 { get; set; } + public global::G.AssistantStreamEventVariant10? Type89 { get; set; } /// /// /// - public global::G.ChatCompletionToolChoiceOption? Type90 { get; set; } + public global::G.AssistantStreamEventVariant10Event? Type90 { get; set; } /// /// /// - public global::G.ChatCompletionToolChoiceOptionEnum? Type91 { get; set; } + public global::G.AssistantStreamEventVariant11? Type91 { get; set; } /// /// /// - public global::G.ChatCompletionNamedToolChoice? Type92 { get; set; } + public global::G.AssistantStreamEventVariant11Event? Type92 { get; set; } /// /// /// - public global::G.ChatCompletionNamedToolChoiceType? Type93 { get; set; } + public global::G.AssistantStreamEventVariant12? Type93 { get; set; } /// /// /// - public global::G.ChatCompletionNamedToolChoiceFunction? Type94 { get; set; } + public global::G.AssistantStreamEventVariant12Event? Type94 { get; set; } /// /// /// - public global::G.ChatCompletionMessageToolCallChunk? Type95 { get; set; } + public global::G.AssistantStreamEventVariant13? Type95 { get; set; } /// /// /// - public global::G.ChatCompletionMessageToolCallChunkType? Type96 { get; set; } + public global::G.AssistantStreamEventVariant13Event? Type96 { get; set; } /// /// /// - public global::G.ChatCompletionMessageToolCallChunkFunction? Type97 { get; set; } + public global::G.AssistantStreamEventVariant14? Type97 { get; set; } /// /// /// - public global::G.ChatCompletionRole? Type98 { get; set; } + public global::G.AssistantStreamEventVariant14Event? Type98 { get; set; } /// /// /// - public global::G.ChatCompletionResponseMessage? Type99 { get; set; } + public global::G.RunStepObject? Type99 { get; set; } /// /// /// - public global::G.ChatCompletionResponseMessageRole? Type100 { get; set; } + public global::G.RunStepObjectObject? Type100 { get; set; } /// /// /// - public global::G.ChatCompletionResponseMessageFunctionCall? Type101 { get; set; } + public global::G.RunStepObjectType? Type101 { get; set; } /// /// /// - public global::G.ChatCompletionStreamResponseDelta? Type102 { get; set; } + public global::G.RunStepObjectStatus? Type102 { get; set; } /// /// /// - public global::G.ChatCompletionStreamResponseDeltaFunctionCall? Type103 { get; set; } + public global::G.RunStepObjectStepDetails? Type103 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type104 { get; set; } + public global::G.RunStepDetailsMessageCreationObject? Type104 { get; set; } /// /// /// - public global::G.ChatCompletionStreamResponseDeltaRole? Type105 { get; set; } + public global::G.RunStepDetailsMessageCreationObjectType? Type105 { get; set; } /// /// /// - public global::G.CreateChatCompletionRequest? Type106 { get; set; } + public global::G.RunStepDetailsMessageCreationObjectMessageCreation? Type106 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type107 { get; set; } + public global::G.RunStepDetailsToolCallsObject? Type107 { get; set; } /// /// /// - public global::G.AnyOf? Type108 { get; set; } + public global::G.RunStepDetailsToolCallsObjectType? Type108 { get; set; } /// /// /// - public global::G.CreateChatCompletionRequestModel? Type109 { get; set; } + public global::System.Collections.Generic.IList? Type109 { get; set; } /// /// /// - public global::G.ResponseFormat? Type110 { get; set; } + public global::G.ToolCallsItem2? Type110 { get; set; } /// /// /// - public global::G.CreateChatCompletionRequestResponseFormatDiscriminator? Type111 { get; set; } + public global::G.RunStepDetailsToolCallsCodeObject? Type111 { get; set; } /// /// /// - public global::G.CreateChatCompletionRequestResponseFormatDiscriminatorType? Type112 { get; set; } + public global::G.RunStepDetailsToolCallsCodeObjectType? Type112 { get; set; } /// /// /// - public global::G.CreateChatCompletionRequestServiceTier? Type113 { get; set; } + public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreter? Type113 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type114 { get; set; } + public global::System.Collections.Generic.IList? Type114 { get; set; } /// /// /// - public global::G.OneOf? Type115 { get; set; } + public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutput? Type115 { get; set; } /// /// /// - public global::G.CreateChatCompletionRequestFunctionCall? Type116 { get; set; } + public global::G.RunStepDetailsToolCallsCodeOutputLogsObject? Type116 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type117 { get; set; } + public global::G.RunStepDetailsToolCallsCodeOutputLogsObjectType? Type117 { get; set; } /// /// /// - public global::G.CreateChatCompletionResponse? Type118 { get; set; } + public global::G.RunStepDetailsToolCallsCodeOutputImageObject? Type118 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type119 { get; set; } + public global::G.RunStepDetailsToolCallsCodeOutputImageObjectType? Type119 { get; set; } /// /// /// - public global::G.CreateChatCompletionResponseChoice? Type120 { get; set; } + public global::G.RunStepDetailsToolCallsCodeOutputImageObjectImage? Type120 { get; set; } /// /// /// - public global::G.CreateChatCompletionResponseChoiceFinishReason? Type121 { get; set; } + public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator? Type121 { get; set; } /// /// /// - public global::G.CreateChatCompletionResponseChoiceLogprobs? Type122 { get; set; } + public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType? Type122 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type123 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchObject? Type123 { get; set; } /// /// /// - public global::G.ChatCompletionTokenLogprob? Type124 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchObjectType? Type124 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type125 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchObjectFileSearch? Type125 { get; set; } /// /// /// - public global::G.ChatCompletionTokenLogprobTopLogprob? Type126 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObject? Type126 { get; set; } /// /// /// - public global::G.CreateChatCompletionResponseServiceTier? Type127 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker? Type127 { get; set; } /// /// /// - public global::G.CreateChatCompletionResponseObject? Type128 { get; set; } + public global::System.Collections.Generic.IList? Type128 { get; set; } /// /// /// - public global::G.CreateChatCompletionFunctionResponse? Type129 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchResultObject? Type129 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type130 { get; set; } + public global::System.Collections.Generic.IList? Type130 { get; set; } /// /// /// - public global::G.CreateChatCompletionFunctionResponseChoice? Type131 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItem? Type131 { get; set; } /// /// /// - public global::G.CreateChatCompletionFunctionResponseChoiceFinishReason? Type132 { get; set; } + public global::G.RunStepDetailsToolCallsFileSearchResultObjectContentItemType? Type132 { get; set; } /// /// /// - public global::G.CreateChatCompletionFunctionResponseObject? Type133 { get; set; } + public global::G.RunStepDetailsToolCallsFunctionObject? Type133 { get; set; } /// /// /// - public global::G.ListPaginatedFineTuningJobsResponse? Type134 { get; set; } + public global::G.RunStepDetailsToolCallsFunctionObjectType? Type134 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type135 { get; set; } + public global::G.RunStepDetailsToolCallsFunctionObjectFunction? Type135 { get; set; } /// /// /// - public global::G.FineTuningJob? Type136 { get; set; } + public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminator? Type136 { get; set; } /// /// /// - public global::G.FineTuningJobError? Type137 { get; set; } + public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? Type137 { get; set; } /// /// /// - public global::G.FineTuningJobHyperparameters? Type138 { get; set; } + public global::G.RunStepObjectStepDetailsDiscriminator? Type138 { get; set; } /// /// /// - public global::G.OneOf? Type139 { get; set; } + public global::G.RunStepObjectStepDetailsDiscriminatorType? Type139 { get; set; } /// /// /// - public global::G.FineTuningJobHyperparametersNEpochs? Type140 { get; set; } + public global::G.RunStepObjectLastError? Type140 { get; set; } /// /// /// - public global::G.FineTuningJobObject? Type141 { get; set; } + public global::G.RunStepObjectLastErrorCode? Type141 { get; set; } /// /// /// - public global::G.FineTuningJobStatus? Type142 { get; set; } + public global::G.RunStepCompletionUsage? Type142 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type143 { get; set; } + public global::G.AssistantStreamEventVariant15? Type143 { get; set; } /// /// /// - public global::G.OneOf? Type144 { get; set; } + public global::G.AssistantStreamEventVariant15Event? Type144 { get; set; } /// /// /// - public global::G.FineTuningIntegration? Type145 { get; set; } + public global::G.AssistantStreamEventVariant16? Type145 { get; set; } /// /// /// - public global::G.FineTuningIntegrationType? Type146 { get; set; } + public global::G.AssistantStreamEventVariant16Event? Type146 { get; set; } /// /// /// - public global::G.FineTuningIntegrationWandb? Type147 { get; set; } + public global::G.RunStepDeltaObject? Type147 { get; set; } /// /// /// - public global::G.ListPaginatedFineTuningJobsResponseObject? Type148 { get; set; } + public global::G.RunStepDeltaObjectObject? Type148 { get; set; } /// /// /// - public global::G.CreateChatCompletionStreamResponse? Type149 { get; set; } + public global::G.RunStepDeltaObjectDelta? Type149 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type150 { get; set; } + public global::G.RunStepDeltaObjectDeltaStepDetails? Type150 { get; set; } /// /// /// - public global::G.CreateChatCompletionStreamResponseChoice? Type151 { get; set; } + public global::G.RunStepDeltaStepDetailsMessageCreationObject? Type151 { get; set; } /// /// /// - public global::G.CreateChatCompletionStreamResponseChoiceLogprobs? Type152 { get; set; } + public global::G.RunStepDeltaStepDetailsMessageCreationObjectType? Type152 { get; set; } /// /// /// - public global::G.CreateChatCompletionStreamResponseChoiceFinishReason? Type153 { get; set; } + public global::G.RunStepDeltaStepDetailsMessageCreationObjectMessageCreation? Type153 { get; set; } /// /// /// - public global::G.CreateChatCompletionStreamResponseServiceTier? Type154 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsObject? Type154 { get; set; } /// /// /// - public global::G.CreateChatCompletionStreamResponseObject? Type155 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsObjectType? Type155 { get; set; } /// /// /// - public global::G.CreateChatCompletionStreamResponseUsage? Type156 { get; set; } + public global::System.Collections.Generic.IList? Type156 { get; set; } /// /// /// - public global::G.CreateImageRequest? Type157 { get; set; } + public global::G.ToolCallsItem? Type157 { get; set; } /// /// /// - public global::G.AnyOf? Type158 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? Type158 { get; set; } /// /// /// - public global::G.CreateImageRequestModel? Type159 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectType? Type159 { get; set; } /// /// /// - public global::G.CreateImageRequestQuality? Type160 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? Type160 { get; set; } /// /// /// - public global::G.CreateImageRequestResponseFormat? Type161 { get; set; } + public global::System.Collections.Generic.IList? Type161 { get; set; } /// /// /// - public global::G.CreateImageRequestSize? Type162 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutput? Type162 { get; set; } /// /// /// - public global::G.CreateImageRequestStyle? Type163 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject? Type163 { get; set; } /// /// /// - public global::G.ImagesResponse? Type164 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType? Type164 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type165 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputImageObject? Type165 { get; set; } /// /// /// - public global::G.Image? Type166 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType? Type166 { get; set; } /// /// /// - public global::G.CreateImageEditRequest? Type167 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage? Type167 { get; set; } /// /// /// - public byte[]? Type168 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator? Type168 { get; set; } /// /// /// - public global::G.AnyOf? Type169 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType? Type169 { get; set; } /// /// /// - public global::G.CreateImageEditRequestModel? Type170 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? Type170 { get; set; } /// /// /// - public global::G.CreateImageEditRequestSize? Type171 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObjectType? Type171 { get; set; } /// /// /// - public global::G.CreateImageEditRequestResponseFormat? Type172 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Type172 { get; set; } /// /// /// - public global::G.CreateImageVariationRequest? Type173 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsFunctionObjectType? Type173 { get; set; } /// /// /// - public global::G.AnyOf? Type174 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsFunctionObjectFunction? Type174 { get; set; } /// /// /// - public global::G.CreateImageVariationRequestModel? Type175 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator? Type175 { get; set; } /// /// /// - public global::G.CreateImageVariationRequestResponseFormat? Type176 { get; set; } + public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? Type176 { get; set; } /// /// /// - public global::G.CreateImageVariationRequestSize? Type177 { get; set; } + public global::G.RunStepDeltaObjectDeltaStepDetailsDiscriminator? Type177 { get; set; } /// /// /// - public global::G.CreateModerationRequest? Type178 { get; set; } + public global::G.RunStepDeltaObjectDeltaStepDetailsDiscriminatorType? Type178 { get; set; } /// /// /// - public global::G.AnyOf? Type179 { get; set; } + public global::G.AssistantStreamEventVariant17? Type179 { get; set; } /// /// /// - public global::G.CreateModerationRequestModel? Type180 { get; set; } + public global::G.AssistantStreamEventVariant17Event? Type180 { get; set; } /// /// /// - public global::G.CreateModerationResponse? Type181 { get; set; } + public global::G.AssistantStreamEventVariant18? Type181 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type182 { get; set; } + public global::G.AssistantStreamEventVariant18Event? Type182 { get; set; } /// /// /// - public global::G.CreateModerationResponseResult? Type183 { get; set; } + public global::G.AssistantStreamEventVariant19? Type183 { get; set; } /// /// /// - public global::G.CreateModerationResponseResultCategories? Type184 { get; set; } + public global::G.AssistantStreamEventVariant19Event? Type184 { get; set; } /// /// /// - public global::G.CreateModerationResponseResultCategoryScores? Type185 { get; set; } + public global::G.AssistantStreamEventVariant20? Type185 { get; set; } /// /// /// - public global::G.ListFilesResponse? Type186 { get; set; } + public global::G.AssistantStreamEventVariant20Event? Type186 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type187 { get; set; } + public global::G.AssistantStreamEventVariant21? Type187 { get; set; } /// /// /// - public global::G.OpenAIFile? Type188 { get; set; } + public global::G.AssistantStreamEventVariant21Event? Type188 { get; set; } /// /// /// - public global::G.OpenAIFileObject? Type189 { get; set; } + public global::G.MessageObject? Type189 { get; set; } /// /// /// - public global::G.OpenAIFilePurpose? Type190 { get; set; } + public global::G.MessageObjectObject? Type190 { get; set; } /// /// /// - public global::G.OpenAIFileStatus? Type191 { get; set; } + public global::G.MessageObjectStatus? Type191 { get; set; } /// /// /// - public global::G.ListFilesResponseObject? Type192 { get; set; } + public global::G.MessageObjectIncompleteDetails? Type192 { get; set; } /// /// /// - public global::G.CreateFileRequest? Type193 { get; set; } + public global::G.MessageObjectIncompleteDetailsReason? Type193 { get; set; } /// /// /// - public global::G.CreateFileRequestPurpose? Type194 { get; set; } + public global::G.MessageObjectRole? Type194 { get; set; } /// /// /// - public global::G.DeleteFileResponse? Type195 { get; set; } + public global::System.Collections.Generic.IList? Type195 { get; set; } /// /// /// - public global::G.DeleteFileResponseObject? Type196 { get; set; } + public global::G.ContentItem2? Type196 { get; set; } /// /// /// - public global::G.CreateUploadRequest? Type197 { get; set; } + public global::G.MessageContentImageFileObject? Type197 { get; set; } /// /// /// - public global::G.CreateUploadRequestPurpose? Type198 { get; set; } + public global::G.MessageContentImageFileObjectType? Type198 { get; set; } /// /// /// - public global::G.AddUploadPartRequest? Type199 { get; set; } + public global::G.MessageContentImageFileObjectImageFile? Type199 { get; set; } /// /// /// - public global::G.CompleteUploadRequest? Type200 { get; set; } + public global::G.MessageContentImageFileObjectImageFileDetail? Type200 { get; set; } /// /// /// - public global::G.CancelUploadRequest? Type201 { get; set; } + public global::G.MessageContentImageUrlObject? Type201 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequest? Type202 { get; set; } + public global::G.MessageContentImageUrlObjectType? Type202 { get; set; } /// /// /// - public global::G.AnyOf? Type203 { get; set; } + public global::G.MessageContentImageUrlObjectImageUrl? Type203 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestModel? Type204 { get; set; } + public global::G.MessageContentImageUrlObjectImageUrlDetail? Type204 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestHyperparameters? Type205 { get; set; } + public global::G.MessageContentTextObject? Type205 { get; set; } /// /// /// - public global::G.OneOf? Type206 { get; set; } + public global::G.MessageContentTextObjectType? Type206 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestHyperparametersBatchSize? Type207 { get; set; } + public global::G.MessageContentTextObjectText? Type207 { get; set; } /// /// /// - public global::G.OneOf? Type208 { get; set; } + public global::System.Collections.Generic.IList? Type208 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type209 { get; set; } + public global::G.AnnotationsItem? Type209 { get; set; } /// /// /// - public global::G.OneOf? Type210 { get; set; } + public global::G.MessageContentTextAnnotationsFileCitationObject? Type210 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestHyperparametersNEpochs? Type211 { get; set; } + public global::G.MessageContentTextAnnotationsFileCitationObjectType? Type211 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type212 { get; set; } + public global::G.MessageContentTextAnnotationsFileCitationObjectFileCitation? Type212 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestIntegration? Type213 { get; set; } + public global::G.MessageContentTextAnnotationsFilePathObject? Type213 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestIntegrationType? Type214 { get; set; } + public global::G.MessageContentTextAnnotationsFilePathObjectType? Type214 { get; set; } /// /// /// - public global::G.CreateFineTuningJobRequestIntegrationWandb? Type215 { get; set; } + public global::G.MessageContentTextAnnotationsFilePathObjectFilePath? Type215 { get; set; } /// /// /// - public global::G.ListFineTuningJobEventsResponse? Type216 { get; set; } + public global::G.MessageContentTextObjectTextAnnotationDiscriminator? Type216 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type217 { get; set; } + public global::G.MessageContentTextObjectTextAnnotationDiscriminatorType? Type217 { get; set; } /// /// /// - public global::G.FineTuningJobEvent? Type218 { get; set; } + public global::G.MessageContentRefusalObject? Type218 { get; set; } /// /// /// - public global::G.FineTuningJobEventLevel? Type219 { get; set; } + public global::G.MessageContentRefusalObjectType? Type219 { get; set; } /// /// /// - public global::G.FineTuningJobEventObject? Type220 { get; set; } + public global::G.MessageObjectContentItemDiscriminator? Type220 { get; set; } /// /// /// - public global::G.ListFineTuningJobEventsResponseObject? Type221 { get; set; } + public global::G.MessageObjectContentItemDiscriminatorType? Type221 { get; set; } /// /// /// - public global::G.ListFineTuningJobCheckpointsResponse? Type222 { get; set; } + public global::System.Collections.Generic.IList? Type222 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type223 { get; set; } + public global::G.MessageObjectAttachment? Type223 { get; set; } /// /// /// - public global::G.FineTuningJobCheckpoint? Type224 { get; set; } + public global::System.Collections.Generic.IList? Type224 { get; set; } /// /// /// - public global::G.FineTuningJobCheckpointMetrics? Type225 { get; set; } + public global::G.ToolsItem6? Type225 { get; set; } /// /// /// - public global::G.FineTuningJobCheckpointObject? Type226 { get; set; } + public global::G.AssistantToolsFileSearchTypeOnly? Type226 { get; set; } /// /// /// - public global::G.ListFineTuningJobCheckpointsResponseObject? Type227 { get; set; } + public global::G.AssistantToolsFileSearchTypeOnlyType? Type227 { get; set; } /// /// /// - public global::G.CreateEmbeddingRequest? Type228 { get; set; } + public global::G.MessageObjectAttachmentToolDiscriminator? Type228 { get; set; } /// /// /// - public global::G.AnyOf? Type229 { get; set; } + public global::G.MessageObjectAttachmentToolDiscriminatorType? Type229 { get; set; } /// /// /// - public global::G.CreateEmbeddingRequestModel? Type230 { get; set; } + public global::G.AssistantStreamEventVariant22? Type230 { get; set; } /// /// /// - public global::G.CreateEmbeddingRequestEncodingFormat? Type231 { get; set; } + public global::G.AssistantStreamEventVariant22Event? Type231 { get; set; } /// /// /// - public global::G.CreateEmbeddingResponse? Type232 { get; set; } + public global::G.AssistantStreamEventVariant23? Type232 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type233 { get; set; } + public global::G.AssistantStreamEventVariant23Event? Type233 { get; set; } /// /// /// - public global::G.Embedding? Type234 { get; set; } + public global::G.MessageDeltaObject? Type234 { get; set; } /// /// /// - public global::G.EmbeddingObject? Type235 { get; set; } + public global::G.MessageDeltaObjectObject? Type235 { get; set; } /// /// /// - public global::G.CreateEmbeddingResponseObject? Type236 { get; set; } + public global::G.MessageDeltaObjectDelta? Type236 { get; set; } /// /// /// - public global::G.CreateEmbeddingResponseUsage? Type237 { get; set; } + public global::G.MessageDeltaObjectDeltaRole? Type237 { get; set; } /// /// /// - public global::G.CreateTranscriptionRequest? Type238 { get; set; } + public global::System.Collections.Generic.IList? Type238 { get; set; } /// /// /// - public global::G.AnyOf? Type239 { get; set; } + public global::G.ContentItem? Type239 { get; set; } /// /// /// - public global::G.CreateTranscriptionRequestModel? Type240 { get; set; } + public global::G.MessageDeltaContentImageFileObject? Type240 { get; set; } /// /// /// - public global::G.CreateTranscriptionRequestResponseFormat? Type241 { get; set; } + public global::G.MessageDeltaContentImageFileObjectType? Type241 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type242 { get; set; } + public global::G.MessageDeltaContentImageFileObjectImageFile? Type242 { get; set; } /// /// /// - public global::G.CreateTranscriptionRequestTimestampGranularitie? Type243 { get; set; } + public global::G.MessageDeltaContentImageFileObjectImageFileDetail? Type243 { get; set; } /// /// /// - public global::G.CreateTranscriptionResponseJson? Type244 { get; set; } + public global::G.MessageDeltaContentTextObject? Type244 { get; set; } /// /// /// - public global::G.TranscriptionSegment? Type245 { get; set; } + public global::G.MessageDeltaContentTextObjectType? Type245 { get; set; } /// /// /// - public float? Type246 { get; set; } + public global::G.MessageDeltaContentTextObjectText? Type246 { get; set; } /// /// /// - public global::G.TranscriptionWord? Type247 { get; set; } + public global::System.Collections.Generic.IList? Type247 { get; set; } /// /// /// - public global::G.CreateTranscriptionResponseVerboseJson? Type248 { get; set; } + public global::G.AnnotationsItem2? Type248 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type249 { get; set; } + public global::G.MessageDeltaContentTextAnnotationsFileCitationObject? Type249 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type250 { get; set; } + public global::G.MessageDeltaContentTextAnnotationsFileCitationObjectType? Type250 { get; set; } /// /// /// - public global::G.CreateTranslationRequest? Type251 { get; set; } + public global::G.MessageDeltaContentTextAnnotationsFileCitationObjectFileCitation? Type251 { get; set; } /// /// /// - public global::G.AnyOf? Type252 { get; set; } + public global::G.MessageDeltaContentTextAnnotationsFilePathObject? Type252 { get; set; } /// /// /// - public global::G.CreateTranslationRequestModel? Type253 { get; set; } + public global::G.MessageDeltaContentTextAnnotationsFilePathObjectType? Type253 { get; set; } /// /// /// - public global::G.CreateTranslationResponseJson? Type254 { get; set; } + public global::G.MessageDeltaContentTextAnnotationsFilePathObjectFilePath? Type254 { get; set; } /// /// /// - public global::G.CreateTranslationResponseVerboseJson? Type255 { get; set; } + public global::G.MessageDeltaContentTextObjectTextAnnotationDiscriminator? Type255 { get; set; } /// /// /// - public global::G.CreateSpeechRequest? Type256 { get; set; } + public global::G.MessageDeltaContentTextObjectTextAnnotationDiscriminatorType? Type256 { get; set; } /// /// /// - public global::G.AnyOf? Type257 { get; set; } + public global::G.MessageDeltaContentRefusalObject? Type257 { get; set; } /// /// /// - public global::G.CreateSpeechRequestModel? Type258 { get; set; } + public global::G.MessageDeltaContentRefusalObjectType? Type258 { get; set; } /// /// /// - public global::G.CreateSpeechRequestVoice? Type259 { get; set; } + public global::G.MessageDeltaContentImageUrlObject? Type259 { get; set; } /// /// /// - public global::G.CreateSpeechRequestResponseFormat? Type260 { get; set; } + public global::G.MessageDeltaContentImageUrlObjectType? Type260 { get; set; } /// /// /// - public global::G.Upload? Type261 { get; set; } + public global::G.MessageDeltaContentImageUrlObjectImageUrl? Type261 { get; set; } /// /// /// - public global::G.UploadStatus? Type262 { get; set; } + public global::G.MessageDeltaContentImageUrlObjectImageUrlDetail? Type262 { get; set; } /// /// /// - public global::G.UploadObject? Type263 { get; set; } + public global::G.MessageDeltaObjectDeltaContentItemDiscriminator? Type263 { get; set; } /// /// /// - public global::G.UploadPart? Type264 { get; set; } + public global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? Type264 { get; set; } /// /// /// - public global::G.UploadPartObject? Type265 { get; set; } + public global::G.AssistantStreamEventVariant24? Type265 { get; set; } /// /// /// - public global::G.FinetuneChatRequestInput? Type266 { get; set; } + public global::G.AssistantStreamEventVariant24Event? Type266 { get; set; } /// /// /// - public global::System.Collections.Generic.IList>? Type267 { get; set; } + public global::G.AssistantStreamEventVariant25? Type267 { get; set; } /// /// /// - public global::G.OneOf? Type268 { get; set; } + public global::G.AssistantStreamEventVariant25Event? Type268 { get; set; } /// /// /// - public global::G.FinetuneCompletionRequestInput? Type269 { get; set; } + public global::G.AssistantStreamEventDiscriminator? Type269 { get; set; } /// /// /// - public global::G.RunCompletionUsage? Type270 { get; set; } + public global::G.AssistantStreamEventDiscriminatorEvent? Type270 { get; set; } /// /// /// - public global::G.RunStepCompletionUsage? Type271 { get; set; } + public global::G.AudioResponseFormat? Type271 { get; set; } /// /// /// - public global::G.AssistantsApiResponseFormatOption? Type272 { get; set; } + public global::G.AuditLog? Type272 { get; set; } /// /// /// - public global::G.AssistantsApiResponseFormatOptionEnum? Type273 { get; set; } + public global::G.AuditLogEventType? Type273 { get; set; } /// /// /// - public global::G.AssistantObject? Type274 { get; set; } + public global::G.AuditLogProject? Type274 { get; set; } /// /// /// - public global::G.AssistantObjectObject? Type275 { get; set; } + public global::G.AuditLogActor? Type275 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type276 { get; set; } + public global::G.AuditLogActorType? Type276 { get; set; } /// /// /// - public global::G.ToolsItem? Type277 { get; set; } + public global::G.AuditLogActorSession? Type277 { get; set; } /// /// /// - public global::G.AssistantToolsCode? Type278 { get; set; } + public global::G.AuditLogActorUser? Type278 { get; set; } /// /// /// - public global::G.AssistantToolsCodeType? Type279 { get; set; } + public global::G.AuditLogActorApiKey? Type279 { get; set; } /// /// /// - public global::G.AssistantToolsFileSearch? Type280 { get; set; } + public global::G.AuditLogActorApiKeyType? Type280 { get; set; } /// /// /// - public global::G.AssistantToolsFileSearchType? Type281 { get; set; } + public global::G.AuditLogActorServiceAccount? Type281 { get; set; } /// /// /// - public global::G.AssistantToolsFileSearchFileSearch? Type282 { get; set; } + public global::G.AuditLogApiKeyCreated? Type282 { get; set; } /// /// /// - public global::G.AssistantToolsFunction? Type283 { get; set; } + public global::G.AuditLogApiKeyCreatedData? Type283 { get; set; } /// /// /// - public global::G.AssistantToolsFunctionType? Type284 { get; set; } + public global::G.AuditLogApiKeyUpdated? Type284 { get; set; } /// /// /// - public global::G.AssistantObjectToolDiscriminator? Type285 { get; set; } + public global::G.AuditLogApiKeyUpdatedChangesRequested? Type285 { get; set; } /// /// /// - public global::G.AssistantObjectToolDiscriminatorType? Type286 { get; set; } + public global::G.AuditLogApiKeyDeleted? Type286 { get; set; } /// /// /// - public global::G.AssistantObjectToolResources? Type287 { get; set; } + public global::G.AuditLogInviteSent? Type287 { get; set; } /// /// /// - public global::G.AssistantObjectToolResourcesCodeInterpreter? Type288 { get; set; } + public global::G.AuditLogInviteSentData? Type288 { get; set; } /// /// /// - public global::G.AssistantObjectToolResourcesFileSearch? Type289 { get; set; } + public global::G.AuditLogInviteAccepted? Type289 { get; set; } /// /// /// - public global::G.CreateAssistantRequest? Type290 { get; set; } + public global::G.AuditLogInviteDeleted? Type290 { get; set; } /// /// /// - public global::G.AnyOf? Type291 { get; set; } + public global::G.AuditLogLoginFailed? Type291 { get; set; } /// /// /// - public global::G.CreateAssistantRequestModel? Type292 { get; set; } + public global::G.AuditLogLogoutFailed? Type292 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type293 { get; set; } + public global::G.AuditLogOrganizationUpdated? Type293 { get; set; } /// /// /// - public global::G.ToolsItem2? Type294 { get; set; } + public global::G.AuditLogOrganizationUpdatedChangesRequested? Type294 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolDiscriminator? Type295 { get; set; } + public global::G.AuditLogOrganizationUpdatedChangesRequestedSettings? Type295 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolDiscriminatorType? Type296 { get; set; } + public global::G.AuditLogProjectCreated? Type296 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResources? Type297 { get; set; } + public global::G.AuditLogProjectCreatedData? Type297 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesCodeInterpreter? Type298 { get; set; } + public global::G.AuditLogProjectUpdated? Type298 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearch? Type299 { get; set; } + public global::G.AuditLogProjectUpdatedChangesRequested? Type299 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type300 { get; set; } + public global::G.AuditLogProjectArchived? Type300 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStore? Type301 { get; set; } + public global::G.AuditLogRateLimitUpdated? Type301 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type302 { get; set; } + public global::G.AuditLogRateLimitUpdatedChangesRequested? Type302 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type303 { get; set; } + public global::G.AuditLogRateLimitDeleted? Type303 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type304 { get; set; } + public global::G.AuditLogServiceAccountCreated? Type304 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type305 { get; set; } + public global::G.AuditLogServiceAccountCreatedData? Type305 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type306 { get; set; } + public global::G.AuditLogServiceAccountUpdated? Type306 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type307 { get; set; } + public global::G.AuditLogServiceAccountUpdatedChangesRequested? Type307 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type308 { get; set; } + public global::G.AuditLogServiceAccountDeleted? Type308 { get; set; } /// /// /// - public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type309 { get; set; } + public global::G.AuditLogUserAdded? Type309 { get; set; } /// /// /// - public global::G.ModifyAssistantRequest? Type310 { get; set; } + public global::G.AuditLogUserAddedData? Type310 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type311 { get; set; } + public global::G.AuditLogUserUpdated? Type311 { get; set; } /// /// /// - public global::G.ToolsItem3? Type312 { get; set; } + public global::G.AuditLogUserUpdatedChangesRequested? Type312 { get; set; } /// /// /// - public global::G.ModifyAssistantRequestToolDiscriminator? Type313 { get; set; } + public global::G.AuditLogUserDeleted? Type313 { get; set; } /// /// /// - public global::G.ModifyAssistantRequestToolDiscriminatorType? Type314 { get; set; } + public global::G.AutoChunkingStrategyRequestParam? Type314 { get; set; } /// /// /// - public global::G.ModifyAssistantRequestToolResources? Type315 { get; set; } + public global::G.AutoChunkingStrategyRequestParamType? Type315 { get; set; } /// /// /// - public global::G.ModifyAssistantRequestToolResourcesCodeInterpreter? Type316 { get; set; } + public global::G.Batch? Type316 { get; set; } /// /// /// - public global::G.ModifyAssistantRequestToolResourcesFileSearch? Type317 { get; set; } + public global::G.BatchObject? Type317 { get; set; } /// /// /// - public global::G.DeleteAssistantResponse? Type318 { get; set; } + public global::G.BatchErrors? Type318 { get; set; } /// /// /// - public global::G.DeleteAssistantResponseObject? Type319 { get; set; } + public global::System.Collections.Generic.IList? Type319 { get; set; } /// /// /// - public global::G.ListAssistantsResponse? Type320 { get; set; } + public global::G.BatchErrorsDataItem? Type320 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type321 { get; set; } + public global::G.BatchStatus? Type321 { get; set; } /// /// /// - public global::G.AssistantToolsFileSearchTypeOnly? Type322 { get; set; } + public global::G.BatchRequestCounts? Type322 { get; set; } /// /// /// - public global::G.AssistantToolsFileSearchTypeOnlyType? Type323 { get; set; } + public global::G.BatchRequestInput? Type323 { get; set; } /// /// /// - public global::G.TruncationObject? Type324 { get; set; } + public global::G.BatchRequestInputMethod? Type324 { get; set; } /// /// /// - public global::G.TruncationObjectType? Type325 { get; set; } + public global::G.BatchRequestOutput? Type325 { get; set; } /// /// /// - public global::G.AssistantsApiToolChoiceOption? Type326 { get; set; } + public global::G.BatchRequestOutputResponse? Type326 { get; set; } /// /// /// - public global::G.AssistantsApiToolChoiceOptionEnum? Type327 { get; set; } + public global::G.BatchRequestOutputError? Type327 { get; set; } /// /// /// - public global::G.AssistantsNamedToolChoice? Type328 { get; set; } + public global::G.CancelUploadRequest? Type328 { get; set; } /// /// /// - public global::G.AssistantsNamedToolChoiceType? Type329 { get; set; } + public global::G.ChatCompletionFunctionCallOption? Type329 { get; set; } /// /// /// - public global::G.AssistantsNamedToolChoiceFunction? Type330 { get; set; } + public global::G.ChatCompletionFunctions? Type330 { get; set; } /// /// /// - public global::G.RunObject? Type331 { get; set; } + public global::G.ChatCompletionMessageToolCall? Type331 { get; set; } /// /// /// - public global::G.RunObjectObject? Type332 { get; set; } + public global::G.ChatCompletionMessageToolCallType? Type332 { get; set; } /// /// /// - public global::G.RunObjectStatus? Type333 { get; set; } + public global::G.ChatCompletionMessageToolCallFunction? Type333 { get; set; } /// /// /// - public global::G.RunObjectRequiredAction? Type334 { get; set; } + public global::G.ChatCompletionMessageToolCallChunk? Type334 { get; set; } /// /// /// - public global::G.RunObjectRequiredActionType? Type335 { get; set; } + public global::G.ChatCompletionMessageToolCallChunkType? Type335 { get; set; } /// /// /// - public global::G.RunObjectRequiredActionSubmitToolOutputs? Type336 { get; set; } + public global::G.ChatCompletionMessageToolCallChunkFunction? Type336 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type337 { get; set; } + public global::System.Collections.Generic.IList? Type337 { get; set; } /// /// /// - public global::G.RunToolCallObject? Type338 { get; set; } + public global::System.Collections.Generic.IList? Type338 { get; set; } /// /// /// - public global::G.RunToolCallObjectType? Type339 { get; set; } + public global::G.ChatCompletionModalitie? Type339 { get; set; } /// /// /// - public global::G.RunToolCallObjectFunction? Type340 { get; set; } + public global::G.ChatCompletionNamedToolChoice? Type340 { get; set; } /// /// /// - public global::G.RunObjectLastError? Type341 { get; set; } + public global::G.ChatCompletionNamedToolChoiceType? Type341 { get; set; } /// /// /// - public global::G.RunObjectLastErrorCode? Type342 { get; set; } + public global::G.ChatCompletionNamedToolChoiceFunction? Type342 { get; set; } /// /// /// - public global::G.RunObjectIncompleteDetails? Type343 { get; set; } + public global::G.ChatCompletionRequestAssistantMessage? Type343 { get; set; } /// /// /// - public global::G.RunObjectIncompleteDetailsReason? Type344 { get; set; } + public global::G.OneOf>? Type344 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type345 { get; set; } + public global::System.Collections.Generic.IList? Type345 { get; set; } /// /// /// - public global::G.ToolsItem4? Type346 { get; set; } + public global::G.ChatCompletionRequestAssistantMessageContentPart? Type346 { get; set; } /// /// /// - public global::G.RunObjectToolDiscriminator? Type347 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartText? Type347 { get; set; } /// /// /// - public global::G.RunObjectToolDiscriminatorType? Type348 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartTextType? Type348 { get; set; } /// /// /// - public global::G.CreateRunRequest? Type349 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartRefusal? Type349 { get; set; } /// /// /// - public global::G.AnyOf? Type350 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartRefusalType? Type350 { get; set; } /// /// /// - public global::G.CreateRunRequestModel? Type351 { get; set; } + public global::G.ChatCompletionRequestAssistantMessageContentPartDiscriminator? Type351 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type352 { get; set; } + public global::G.ChatCompletionRequestAssistantMessageContentPartDiscriminatorType? Type352 { get; set; } /// /// /// - public global::G.CreateMessageRequest? Type353 { get; set; } + public global::G.ChatCompletionRequestAssistantMessageRole? Type353 { get; set; } /// /// /// - public global::G.CreateMessageRequestRole? Type354 { get; set; } + public global::G.ChatCompletionRequestAssistantMessageAudio? Type354 { get; set; } /// /// /// - public global::G.OneOf>? Type355 { get; set; } + public global::G.ChatCompletionRequestAssistantMessageFunctionCall? Type355 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type356 { get; set; } + public global::G.ChatCompletionRequestFunctionMessage? Type356 { get; set; } /// /// /// - public global::G.ContentVariant2Item? Type357 { get; set; } + public global::G.ChatCompletionRequestFunctionMessageRole? Type357 { get; set; } /// /// /// - public global::G.MessageContentImageFileObject? Type358 { get; set; } + public global::G.ChatCompletionRequestMessage? Type358 { get; set; } /// /// /// - public global::G.MessageContentImageFileObjectType? Type359 { get; set; } + public global::G.ChatCompletionRequestSystemMessage? Type359 { get; set; } /// /// /// - public global::G.MessageContentImageFileObjectImageFile? Type360 { get; set; } + public global::G.OneOf>? Type360 { get; set; } /// /// /// - public global::G.MessageContentImageFileObjectImageFileDetail? Type361 { get; set; } + public global::System.Collections.Generic.IList? Type361 { get; set; } /// /// /// - public global::G.MessageContentImageUrlObject? Type362 { get; set; } + public global::G.ChatCompletionRequestSystemMessageContentPart? Type362 { get; set; } /// /// /// - public global::G.MessageContentImageUrlObjectType? Type363 { get; set; } + public global::G.ChatCompletionRequestSystemMessageRole? Type363 { get; set; } /// /// /// - public global::G.MessageContentImageUrlObjectImageUrl? Type364 { get; set; } + public global::G.ChatCompletionRequestUserMessage? Type364 { get; set; } /// /// /// - public global::G.MessageContentImageUrlObjectImageUrlDetail? Type365 { get; set; } + public global::G.OneOf>? Type365 { get; set; } /// /// /// - public global::G.MessageRequestContentTextObject? Type366 { get; set; } + public global::System.Collections.Generic.IList? Type366 { get; set; } /// /// /// - public global::G.MessageRequestContentTextObjectType? Type367 { get; set; } + public global::G.ChatCompletionRequestUserMessageContentPart? Type367 { get; set; } /// /// /// - public global::G.CreateMessageRequestContentVariant2ItemDiscriminator? Type368 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartImage? Type368 { get; set; } /// /// /// - public global::G.CreateMessageRequestContentVariant2ItemDiscriminatorType? Type369 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartImageType? Type369 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type370 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartImageImageUrl? Type370 { get; set; } /// /// /// - public global::G.CreateMessageRequestAttachment? Type371 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartImageImageUrlDetail? Type371 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type372 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartAudio? Type372 { get; set; } /// /// /// - public global::G.ToolsItem8? Type373 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartAudioType? Type373 { get; set; } /// /// /// - public global::G.CreateMessageRequestAttachmentToolDiscriminator? Type374 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartAudioInputAudio? Type374 { get; set; } /// /// /// - public global::G.CreateMessageRequestAttachmentToolDiscriminatorType? Type375 { get; set; } + public global::G.ChatCompletionRequestMessageContentPartAudioInputAudioFormat? Type375 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type376 { get; set; } + public global::G.ChatCompletionRequestUserMessageContentPartDiscriminator? Type376 { get; set; } /// /// /// - public global::G.ToolsItem5? Type377 { get; set; } + public global::G.ChatCompletionRequestUserMessageContentPartDiscriminatorType? Type377 { get; set; } /// /// /// - public global::G.CreateRunRequestToolDiscriminator? Type378 { get; set; } + public global::G.ChatCompletionRequestUserMessageRole? Type378 { get; set; } /// /// /// - public global::G.CreateRunRequestToolDiscriminatorType? Type379 { get; set; } + public global::G.ChatCompletionRequestToolMessage? Type379 { get; set; } /// /// /// - public global::G.ListRunsResponse? Type380 { get; set; } + public global::G.ChatCompletionRequestToolMessageRole? Type380 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type381 { get; set; } + public global::G.OneOf>? Type381 { get; set; } /// /// /// - public global::G.ModifyRunRequest? Type382 { get; set; } + public global::System.Collections.Generic.IList? Type382 { get; set; } /// /// /// - public global::G.SubmitToolOutputsRunRequest? Type383 { get; set; } + public global::G.ChatCompletionRequestToolMessageContentPart? Type383 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type384 { get; set; } + public global::G.ChatCompletionRequestMessageDiscriminator? Type384 { get; set; } /// /// /// - public global::G.SubmitToolOutputsRunRequestToolOutput? Type385 { get; set; } + public global::G.ChatCompletionRequestMessageDiscriminatorRole? Type385 { get; set; } /// /// /// - public global::G.CreateThreadAndRunRequest? Type386 { get; set; } + public global::G.ChatCompletionResponseMessage? Type386 { get; set; } /// /// /// - public global::G.CreateThreadRequest? Type387 { get; set; } + public global::G.ChatCompletionResponseMessageRole? Type387 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResources? Type388 { get; set; } + public global::G.ChatCompletionResponseMessageFunctionCall? Type388 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesCodeInterpreter? Type389 { get; set; } + public global::G.ChatCompletionResponseMessageAudio? Type389 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearch? Type390 { get; set; } + public global::G.ChatCompletionRole? Type390 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type391 { get; set; } + public global::G.ChatCompletionStreamOptions? Type391 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStore? Type392 { get; set; } + public global::G.ChatCompletionStreamResponseDelta? Type392 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type393 { get; set; } + public global::G.ChatCompletionStreamResponseDeltaFunctionCall? Type393 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type394 { get; set; } + public global::System.Collections.Generic.IList? Type394 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type395 { get; set; } + public global::G.ChatCompletionStreamResponseDeltaRole? Type395 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type396 { get; set; } + public global::G.ChatCompletionTokenLogprob? Type396 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type397 { get; set; } + public global::System.Collections.Generic.IList? Type397 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type398 { get; set; } + public global::System.Collections.Generic.IList? Type398 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type399 { get; set; } + public global::G.ChatCompletionTokenLogprobTopLogprob? Type399 { get; set; } /// /// /// - public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type400 { get; set; } + public global::G.ChatCompletionTool? Type400 { get; set; } /// /// /// - public global::G.AnyOf? Type401 { get; set; } + public global::G.ChatCompletionToolType? Type401 { get; set; } /// /// /// - public global::G.CreateThreadAndRunRequestModel? Type402 { get; set; } + public global::G.ChatCompletionToolChoiceOption? Type402 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type403 { get; set; } + public global::G.ChatCompletionToolChoiceOptionEnum? Type403 { get; set; } /// /// /// - public global::G.ToolsItem6? Type404 { get; set; } + public global::G.ChunkingStrategyRequestParam? Type404 { get; set; } /// /// /// - public global::G.CreateThreadAndRunRequestToolDiscriminator? Type405 { get; set; } + public global::G.StaticChunkingStrategyRequestParam? Type405 { get; set; } /// /// /// - public global::G.CreateThreadAndRunRequestToolDiscriminatorType? Type406 { get; set; } + public global::G.StaticChunkingStrategyRequestParamType? Type406 { get; set; } /// /// /// - public global::G.CreateThreadAndRunRequestToolResources? Type407 { get; set; } + public global::G.StaticChunkingStrategy? Type407 { get; set; } /// /// /// - public global::G.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type408 { get; set; } + public global::G.ChunkingStrategyRequestParamDiscriminator? Type408 { get; set; } /// /// /// - public global::G.CreateThreadAndRunRequestToolResourcesFileSearch? Type409 { get; set; } + public global::G.ChunkingStrategyRequestParamDiscriminatorType? Type409 { get; set; } /// /// /// - public global::G.ThreadObject? Type410 { get; set; } + public global::G.CompleteUploadRequest? Type410 { get; set; } /// /// /// - public global::G.ThreadObjectObject? Type411 { get; set; } + public global::G.CompletionUsage? Type411 { get; set; } /// /// /// - public global::G.ThreadObjectToolResources? Type412 { get; set; } + public global::G.CompletionUsageCompletionTokensDetails? Type412 { get; set; } /// /// /// - public global::G.ThreadObjectToolResourcesCodeInterpreter? Type413 { get; set; } + public global::G.CompletionUsagePromptTokensDetails? Type413 { get; set; } /// /// /// - public global::G.ThreadObjectToolResourcesFileSearch? Type414 { get; set; } + public global::G.CostsResult? Type414 { get; set; } /// /// /// - public global::G.ModifyThreadRequest? Type415 { get; set; } + public global::G.CostsResultObject? Type415 { get; set; } /// /// /// - public global::G.ModifyThreadRequestToolResources? Type416 { get; set; } + public global::G.CostsResultAmount? Type416 { get; set; } /// /// /// - public global::G.ModifyThreadRequestToolResourcesCodeInterpreter? Type417 { get; set; } + public global::G.CreateAssistantRequest? Type417 { get; set; } /// /// /// - public global::G.ModifyThreadRequestToolResourcesFileSearch? Type418 { get; set; } + public global::G.AnyOf? Type418 { get; set; } /// /// /// - public global::G.DeleteThreadResponse? Type419 { get; set; } + public global::G.CreateAssistantRequestModel? Type419 { get; set; } /// /// /// - public global::G.DeleteThreadResponseObject? Type420 { get; set; } + public global::System.Collections.Generic.IList? Type420 { get; set; } /// /// /// - public global::G.ListThreadsResponse? Type421 { get; set; } + public global::G.ToolsItem2? Type421 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type422 { get; set; } + public global::G.CreateAssistantRequestToolDiscriminator? Type422 { get; set; } /// /// /// - public global::G.MessageObject? Type423 { get; set; } + public global::G.CreateAssistantRequestToolDiscriminatorType? Type423 { get; set; } /// /// /// - public global::G.MessageObjectObject? Type424 { get; set; } + public global::G.CreateAssistantRequestToolResources? Type424 { get; set; } /// /// /// - public global::G.MessageObjectStatus? Type425 { get; set; } + public global::G.CreateAssistantRequestToolResourcesCodeInterpreter? Type425 { get; set; } /// /// /// - public global::G.MessageObjectIncompleteDetails? Type426 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearch? Type426 { get; set; } /// /// /// - public global::G.MessageObjectIncompleteDetailsReason? Type427 { get; set; } + public global::System.Collections.Generic.IList? Type427 { get; set; } /// /// /// - public global::G.MessageObjectRole? Type428 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStore? Type428 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type429 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type429 { get; set; } /// /// /// - public global::G.ContentItem? Type430 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type430 { get; set; } /// /// /// - public global::G.MessageContentTextObject? Type431 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type431 { get; set; } /// /// /// - public global::G.MessageContentTextObjectType? Type432 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type432 { get; set; } /// /// /// - public global::G.MessageContentTextObjectText? Type433 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type433 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type434 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type434 { get; set; } /// /// /// - public global::G.AnnotationsItem? Type435 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type435 { get; set; } /// /// /// - public global::G.MessageContentTextAnnotationsFileCitationObject? Type436 { get; set; } + public global::G.CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type436 { get; set; } /// /// /// - public global::G.MessageContentTextAnnotationsFileCitationObjectType? Type437 { get; set; } + public global::G.CreateChatCompletionFunctionResponse? Type437 { get; set; } /// /// /// - public global::G.MessageContentTextAnnotationsFileCitationObjectFileCitation? Type438 { get; set; } + public global::System.Collections.Generic.IList? Type438 { get; set; } /// /// /// - public global::G.MessageContentTextAnnotationsFilePathObject? Type439 { get; set; } + public global::G.CreateChatCompletionFunctionResponseChoice? Type439 { get; set; } /// /// /// - public global::G.MessageContentTextAnnotationsFilePathObjectType? Type440 { get; set; } + public global::G.CreateChatCompletionFunctionResponseChoiceFinishReason? Type440 { get; set; } /// /// /// - public global::G.MessageContentTextAnnotationsFilePathObjectFilePath? Type441 { get; set; } + public global::G.CreateChatCompletionFunctionResponseObject? Type441 { get; set; } /// /// /// - public global::G.MessageContentTextObjectTextAnnotationDiscriminator? Type442 { get; set; } + public global::G.CreateChatCompletionRequest? Type442 { get; set; } /// /// /// - public global::G.MessageContentTextObjectTextAnnotationDiscriminatorType? Type443 { get; set; } + public global::System.Collections.Generic.IList? Type443 { get; set; } /// /// /// - public global::G.MessageContentRefusalObject? Type444 { get; set; } + public global::G.AnyOf? Type444 { get; set; } /// /// /// - public global::G.MessageContentRefusalObjectType? Type445 { get; set; } + public global::G.CreateChatCompletionRequestModel? Type445 { get; set; } /// /// /// - public global::G.MessageObjectContentItemDiscriminator? Type446 { get; set; } + public global::System.Collections.Generic.Dictionary? Type446 { get; set; } /// /// /// - public global::G.MessageObjectContentItemDiscriminatorType? Type447 { get; set; } + public global::System.Collections.Generic.Dictionary? Type447 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type448 { get; set; } + public global::G.PredictionContent? Type448 { get; set; } /// /// /// - public global::G.MessageObjectAttachment? Type449 { get; set; } + public global::G.PredictionContentType? Type449 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type450 { get; set; } + public global::G.OneOf>? Type450 { get; set; } /// /// /// - public global::G.ToolsItem7? Type451 { get; set; } + public global::System.Collections.Generic.IList? Type451 { get; set; } /// /// /// - public global::G.MessageObjectAttachmentToolDiscriminator? Type452 { get; set; } + public global::G.CreateChatCompletionRequestAudio? Type452 { get; set; } /// /// /// - public global::G.MessageObjectAttachmentToolDiscriminatorType? Type453 { get; set; } + public global::G.CreateChatCompletionRequestAudioVoice? Type453 { get; set; } /// /// /// - public global::G.MessageDeltaObject? Type454 { get; set; } + public global::G.CreateChatCompletionRequestAudioFormat? Type454 { get; set; } /// /// /// - public global::G.MessageDeltaObjectObject? Type455 { get; set; } + public global::G.ResponseFormat? Type455 { get; set; } /// /// /// - public global::G.MessageDeltaObjectDelta? Type456 { get; set; } + public global::G.CreateChatCompletionRequestResponseFormatDiscriminator? Type456 { get; set; } /// /// /// - public global::G.MessageDeltaObjectDeltaRole? Type457 { get; set; } + public global::G.CreateChatCompletionRequestResponseFormatDiscriminatorType? Type457 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type458 { get; set; } + public global::G.CreateChatCompletionRequestServiceTier? Type458 { get; set; } /// /// /// - public global::G.ContentItem2? Type459 { get; set; } + public global::G.OneOf>? Type459 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageFileObject? Type460 { get; set; } + public global::System.Collections.Generic.IList? Type460 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageFileObjectType? Type461 { get; set; } + public global::G.OneOf? Type461 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageFileObjectImageFile? Type462 { get; set; } + public global::G.CreateChatCompletionRequestFunctionCall? Type462 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageFileObjectImageFileDetail? Type463 { get; set; } + public global::System.Collections.Generic.IList? Type463 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextObject? Type464 { get; set; } + public global::G.CreateChatCompletionResponse? Type464 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextObjectType? Type465 { get; set; } + public global::System.Collections.Generic.IList? Type465 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextObjectText? Type466 { get; set; } + public global::G.CreateChatCompletionResponseChoice? Type466 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type467 { get; set; } + public global::G.CreateChatCompletionResponseChoiceFinishReason? Type467 { get; set; } /// /// /// - public global::G.AnnotationsItem2? Type468 { get; set; } + public global::G.CreateChatCompletionResponseChoiceLogprobs? Type468 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextAnnotationsFileCitationObject? Type469 { get; set; } + public global::System.Collections.Generic.IList? Type469 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextAnnotationsFileCitationObjectType? Type470 { get; set; } + public global::G.CreateChatCompletionResponseServiceTier? Type470 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextAnnotationsFileCitationObjectFileCitation? Type471 { get; set; } + public global::G.CreateChatCompletionResponseObject? Type471 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextAnnotationsFilePathObject? Type472 { get; set; } + public global::G.CreateChatCompletionStreamResponse? Type472 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextAnnotationsFilePathObjectType? Type473 { get; set; } + public global::System.Collections.Generic.IList? Type473 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextAnnotationsFilePathObjectFilePath? Type474 { get; set; } + public global::G.CreateChatCompletionStreamResponseChoice? Type474 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextObjectTextAnnotationDiscriminator? Type475 { get; set; } + public global::G.CreateChatCompletionStreamResponseChoiceLogprobs? Type475 { get; set; } /// /// /// - public global::G.MessageDeltaContentTextObjectTextAnnotationDiscriminatorType? Type476 { get; set; } + public global::G.CreateChatCompletionStreamResponseChoiceFinishReason? Type476 { get; set; } /// /// /// - public global::G.MessageDeltaContentRefusalObject? Type477 { get; set; } + public global::G.CreateChatCompletionStreamResponseServiceTier? Type477 { get; set; } /// /// /// - public global::G.MessageDeltaContentRefusalObjectType? Type478 { get; set; } + public global::G.CreateChatCompletionStreamResponseObject? Type478 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageUrlObject? Type479 { get; set; } + public global::G.CreateChatCompletionStreamResponseUsage? Type479 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageUrlObjectType? Type480 { get; set; } + public global::G.CreateCompletionRequest? Type480 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageUrlObjectImageUrl? Type481 { get; set; } + public global::G.AnyOf? Type481 { get; set; } /// /// /// - public global::G.MessageDeltaContentImageUrlObjectImageUrlDetail? Type482 { get; set; } + public global::G.CreateCompletionRequestModel? Type482 { get; set; } /// /// /// - public global::G.MessageDeltaObjectDeltaContentItemDiscriminator? Type483 { get; set; } + public global::G.OneOf, global::System.Collections.Generic.IList, global::System.Collections.Generic.IList>>? Type483 { get; set; } /// /// /// - public global::G.MessageDeltaObjectDeltaContentItemDiscriminatorType? Type484 { get; set; } + public global::System.Collections.Generic.IList>? Type484 { get; set; } /// /// /// - public global::G.ModifyMessageRequest? Type485 { get; set; } + public global::G.CreateCompletionResponse? Type485 { get; set; } /// /// /// - public global::G.DeleteMessageResponse? Type486 { get; set; } + public global::System.Collections.Generic.IList? Type486 { get; set; } /// /// /// - public global::G.DeleteMessageResponseObject? Type487 { get; set; } + public global::G.CreateCompletionResponseChoice? Type487 { get; set; } /// /// /// - public global::G.ListMessagesResponse? Type488 { get; set; } + public global::G.CreateCompletionResponseChoiceFinishReason? Type488 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type489 { get; set; } + public global::G.CreateCompletionResponseChoiceLogprobs? Type489 { get; set; } /// /// /// - public global::G.RunStepObject? Type490 { get; set; } + public global::System.Collections.Generic.IList? Type490 { get; set; } /// /// /// - public global::G.RunStepObjectObject? Type491 { get; set; } + public global::System.Collections.Generic.IList>? Type491 { get; set; } /// /// /// - public global::G.RunStepObjectType? Type492 { get; set; } + public global::System.Collections.Generic.Dictionary? Type492 { get; set; } /// /// /// - public global::G.RunStepObjectStatus? Type493 { get; set; } + public global::G.CreateCompletionResponseObject? Type493 { get; set; } /// /// /// - public global::G.RunStepObjectStepDetails? Type494 { get; set; } + public global::G.CreateEmbeddingRequest? Type494 { get; set; } /// /// /// - public global::G.RunStepDetailsMessageCreationObject? Type495 { get; set; } + public global::G.AnyOf? Type495 { get; set; } /// /// /// - public global::G.RunStepDetailsMessageCreationObjectType? Type496 { get; set; } + public global::G.CreateEmbeddingRequestModel? Type496 { get; set; } /// /// /// - public global::G.RunStepDetailsMessageCreationObjectMessageCreation? Type497 { get; set; } + public global::G.CreateEmbeddingRequestEncodingFormat? Type497 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsObject? Type498 { get; set; } + public global::G.CreateEmbeddingResponse? Type498 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsObjectType? Type499 { get; set; } + public global::System.Collections.Generic.IList? Type499 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type500 { get; set; } + public global::G.Embedding? Type500 { get; set; } /// /// /// - public global::G.ToolCallsItem? Type501 { get; set; } + public global::G.EmbeddingObject? Type501 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeObject? Type502 { get; set; } + public global::G.CreateEmbeddingResponseObject? Type502 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeObjectType? Type503 { get; set; } + public global::G.CreateEmbeddingResponseUsage? Type503 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreter? Type504 { get; set; } + public global::G.CreateFileRequest? Type504 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type505 { get; set; } + public global::G.CreateFileRequestPurpose? Type505 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutput? Type506 { get; set; } + public global::G.CreateFineTuningJobRequest? Type506 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeOutputLogsObject? Type507 { get; set; } + public global::G.AnyOf? Type507 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeOutputLogsObjectType? Type508 { get; set; } + public global::G.CreateFineTuningJobRequestModel? Type508 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeOutputImageObject? Type509 { get; set; } + public global::G.CreateFineTuningJobRequestHyperparameters? Type509 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeOutputImageObjectType? Type510 { get; set; } + public global::G.OneOf? Type510 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeOutputImageObjectImage? Type511 { get; set; } + public global::G.CreateFineTuningJobRequestHyperparametersBatchSize? Type511 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator? Type512 { get; set; } + public global::G.OneOf? Type512 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType? Type513 { get; set; } + public global::G.CreateFineTuningJobRequestHyperparametersLearningRateMultiplier? Type513 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsFileSearchObject? Type514 { get; set; } + public global::G.OneOf? Type514 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsFileSearchObjectType? Type515 { get; set; } + public global::G.CreateFineTuningJobRequestHyperparametersNEpochs? Type515 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsFunctionObject? Type516 { get; set; } + public global::System.Collections.Generic.IList? Type516 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsFunctionObjectType? Type517 { get; set; } + public global::G.CreateFineTuningJobRequestIntegration? Type517 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsFunctionObjectFunction? Type518 { get; set; } + public global::G.CreateFineTuningJobRequestIntegrationType? Type518 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminator? Type519 { get; set; } + public global::G.CreateFineTuningJobRequestIntegrationWandb? Type519 { get; set; } /// /// /// - public global::G.RunStepDetailsToolCallsObjectToolCallDiscriminatorType? Type520 { get; set; } + public global::G.CreateImageEditRequest? Type520 { get; set; } /// /// /// - public global::G.RunStepObjectStepDetailsDiscriminator? Type521 { get; set; } + public global::G.AnyOf? Type521 { get; set; } /// /// /// - public global::G.RunStepObjectStepDetailsDiscriminatorType? Type522 { get; set; } + public global::G.CreateImageEditRequestModel? Type522 { get; set; } /// /// /// - public global::G.RunStepObjectLastError? Type523 { get; set; } + public global::G.CreateImageEditRequestSize? Type523 { get; set; } /// /// /// - public global::G.RunStepObjectLastErrorCode? Type524 { get; set; } + public global::G.CreateImageEditRequestResponseFormat? Type524 { get; set; } /// /// /// - public global::G.RunStepDeltaObject? Type525 { get; set; } + public global::G.CreateImageRequest? Type525 { get; set; } /// /// /// - public global::G.RunStepDeltaObjectObject? Type526 { get; set; } + public global::G.AnyOf? Type526 { get; set; } /// /// /// - public global::G.RunStepDeltaObjectDelta? Type527 { get; set; } + public global::G.CreateImageRequestModel? Type527 { get; set; } /// /// /// - public global::G.RunStepDeltaObjectDeltaStepDetails? Type528 { get; set; } + public global::G.CreateImageRequestQuality? Type528 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsMessageCreationObject? Type529 { get; set; } + public global::G.CreateImageRequestResponseFormat? Type529 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsMessageCreationObjectType? Type530 { get; set; } + public global::G.CreateImageRequestSize? Type530 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsMessageCreationObjectMessageCreation? Type531 { get; set; } + public global::G.CreateImageRequestStyle? Type531 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsObject? Type532 { get; set; } + public global::G.CreateImageVariationRequest? Type532 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsObjectType? Type533 { get; set; } + public global::G.AnyOf? Type533 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type534 { get; set; } + public global::G.CreateImageVariationRequestModel? Type534 { get; set; } /// /// /// - public global::G.ToolCallsItem2? Type535 { get; set; } + public global::G.CreateImageVariationRequestResponseFormat? Type535 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeObject? Type536 { get; set; } + public global::G.CreateImageVariationRequestSize? Type536 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectType? Type537 { get; set; } + public global::G.CreateMessageRequest? Type537 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter? Type538 { get; set; } + public global::G.CreateMessageRequestRole? Type538 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type539 { get; set; } + public global::G.OneOf>? Type539 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutput? Type540 { get; set; } + public global::System.Collections.Generic.IList? Type540 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject? Type541 { get; set; } + public global::G.ContentVariant2Item? Type541 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType? Type542 { get; set; } + public global::G.MessageRequestContentTextObject? Type542 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputImageObject? Type543 { get; set; } + public global::G.MessageRequestContentTextObjectType? Type543 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType? Type544 { get; set; } + public global::G.CreateMessageRequestContentVariant2ItemDiscriminator? Type544 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage? Type545 { get; set; } + public global::G.CreateMessageRequestContentVariant2ItemDiscriminatorType? Type545 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator? Type546 { get; set; } + public global::System.Collections.Generic.IList? Type546 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType? Type547 { get; set; } + public global::G.CreateMessageRequestAttachment? Type547 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObject? Type548 { get; set; } + public global::System.Collections.Generic.IList? Type548 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsFileSearchObjectType? Type549 { get; set; } + public global::G.ToolsItem3? Type549 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsFunctionObject? Type550 { get; set; } + public global::G.CreateMessageRequestAttachmentToolDiscriminator? Type550 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsFunctionObjectType? Type551 { get; set; } + public global::G.CreateMessageRequestAttachmentToolDiscriminatorType? Type551 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsFunctionObjectFunction? Type552 { get; set; } + public global::G.CreateModerationRequest? Type552 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator? Type553 { get; set; } + public global::G.OneOf, global::System.Collections.Generic.IList>? Type553 { get; set; } /// /// /// - public global::G.RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType? Type554 { get; set; } + public global::System.Collections.Generic.IList? Type554 { get; set; } /// /// /// - public global::G.RunStepDeltaObjectDeltaStepDetailsDiscriminator? Type555 { get; set; } + public global::G.InputVariant3Item? Type555 { get; set; } /// /// /// - public global::G.RunStepDeltaObjectDeltaStepDetailsDiscriminatorType? Type556 { get; set; } + public global::G.CreateModerationRequestInputVariant3ItemVariant1? Type556 { get; set; } /// /// /// - public global::G.ListRunStepsResponse? Type557 { get; set; } + public global::G.CreateModerationRequestInputVariant3ItemVariant1Type? Type557 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type558 { get; set; } + public global::G.CreateModerationRequestInputVariant3ItemVariant1ImageUrl? Type558 { get; set; } /// /// /// - public global::G.VectorStoreExpirationAfter? Type559 { get; set; } + public global::G.CreateModerationRequestInputVariant3ItemVariant2? Type559 { get; set; } /// /// /// - public global::G.VectorStoreExpirationAfterAnchor? Type560 { get; set; } + public global::G.CreateModerationRequestInputVariant3ItemVariant2Type? Type560 { get; set; } /// /// /// - public global::G.VectorStoreObject? Type561 { get; set; } + public global::G.CreateModerationRequestInputVariant3ItemDiscriminator? Type561 { get; set; } /// /// /// - public global::G.VectorStoreObjectObject? Type562 { get; set; } + public global::G.CreateModerationRequestInputVariant3ItemDiscriminatorType? Type562 { get; set; } /// /// /// - public global::G.VectorStoreObjectFileCounts? Type563 { get; set; } + public global::G.AnyOf? Type563 { get; set; } /// /// /// - public global::G.VectorStoreObjectStatus? Type564 { get; set; } + public global::G.CreateModerationRequestModel? Type564 { get; set; } /// /// /// - public global::G.CreateVectorStoreRequest? Type565 { get; set; } + public global::G.CreateModerationResponse? Type565 { get; set; } /// /// /// - public global::G.CreateVectorStoreRequestChunkingStrategy? Type566 { get; set; } + public global::System.Collections.Generic.IList? Type566 { get; set; } /// /// /// - public global::G.AutoChunkingStrategyRequestParam? Type567 { get; set; } + public global::G.CreateModerationResponseResult? Type567 { get; set; } /// /// /// - public global::G.AutoChunkingStrategyRequestParamType? Type568 { get; set; } + public global::G.CreateModerationResponseResultCategories? Type568 { get; set; } /// /// /// - public global::G.StaticChunkingStrategyRequestParam? Type569 { get; set; } + public global::G.CreateModerationResponseResultCategoryScores? Type569 { get; set; } /// /// /// - public global::G.StaticChunkingStrategyRequestParamType? Type570 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypes? Type570 { get; set; } /// /// /// - public global::G.StaticChunkingStrategy? Type571 { get; set; } + public global::System.Collections.Generic.IList? Type571 { get; set; } /// /// /// - public global::G.CreateVectorStoreRequestChunkingStrategyDiscriminator? Type572 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateItem? Type572 { get; set; } /// /// /// - public global::G.CreateVectorStoreRequestChunkingStrategyDiscriminatorType? Type573 { get; set; } + public global::System.Collections.Generic.IList? Type573 { get; set; } /// /// /// - public global::G.UpdateVectorStoreRequest? Type574 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem? Type574 { get; set; } /// /// /// - public global::G.ListVectorStoresResponse? Type575 { get; set; } + public global::System.Collections.Generic.IList? Type575 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type576 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem? Type576 { get; set; } /// /// /// - public global::G.DeleteVectorStoreResponse? Type577 { get; set; } + public global::System.Collections.Generic.IList? Type577 { get; set; } /// /// /// - public global::G.DeleteVectorStoreResponseObject? Type578 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem? Type578 { get; set; } /// /// /// - public global::G.VectorStoreFileObject? Type579 { get; set; } + public global::System.Collections.Generic.IList? Type579 { get; set; } /// /// /// - public global::G.VectorStoreFileObjectObject? Type580 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem? Type580 { get; set; } /// /// /// - public global::G.VectorStoreFileObjectStatus? Type581 { get; set; } + public global::System.Collections.Generic.IList? Type581 { get; set; } /// /// /// - public global::G.VectorStoreFileObjectLastError? Type582 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem? Type582 { get; set; } /// /// /// - public global::G.VectorStoreFileObjectLastErrorCode? Type583 { get; set; } + public global::System.Collections.Generic.IList? Type583 { get; set; } /// /// /// - public global::G.VectorStoreFileObjectChunkingStrategy? Type584 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem? Type584 { get; set; } /// /// /// - public global::G.StaticChunkingStrategyResponseParam? Type585 { get; set; } + public global::System.Collections.Generic.IList? Type585 { get; set; } /// /// /// - public global::G.StaticChunkingStrategyResponseParamType? Type586 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem? Type586 { get; set; } /// /// /// - public global::G.OtherChunkingStrategyResponseParam? Type587 { get; set; } + public global::System.Collections.Generic.IList? Type587 { get; set; } /// /// /// - public global::G.OtherChunkingStrategyResponseParamType? Type588 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction? Type588 { get; set; } /// /// /// - public global::G.VectorStoreFileObjectChunkingStrategyDiscriminator? Type589 { get; set; } + public global::System.Collections.Generic.IList? Type589 { get; set; } /// /// /// - public global::G.VectorStoreFileObjectChunkingStrategyDiscriminatorType? Type590 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualItem? Type590 { get; set; } /// /// /// - public global::G.ChunkingStrategyRequestParam? Type591 { get; set; } + public global::System.Collections.Generic.IList? Type591 { get; set; } /// /// /// - public global::G.ChunkingStrategyRequestParamDiscriminator? Type592 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor? Type592 { get; set; } /// /// /// - public global::G.ChunkingStrategyRequestParamDiscriminatorType? Type593 { get; set; } + public global::System.Collections.Generic.IList? Type593 { get; set; } /// /// /// - public global::G.CreateVectorStoreFileRequest? Type594 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem? Type594 { get; set; } /// /// /// - public global::G.ListVectorStoreFilesResponse? Type595 { get; set; } + public global::System.Collections.Generic.IList? Type595 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type596 { get; set; } + public global::G.CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem? Type596 { get; set; } /// /// /// - public global::G.DeleteVectorStoreFileResponse? Type597 { get; set; } + public global::G.CreateRunRequest? Type597 { get; set; } /// /// /// - public global::G.DeleteVectorStoreFileResponseObject? Type598 { get; set; } + public global::G.AnyOf? Type598 { get; set; } /// /// /// - public global::G.VectorStoreFileBatchObject? Type599 { get; set; } + public global::G.CreateRunRequestModel? Type599 { get; set; } /// /// /// - public global::G.VectorStoreFileBatchObjectObject? Type600 { get; set; } + public global::System.Collections.Generic.IList? Type600 { get; set; } /// /// /// - public global::G.VectorStoreFileBatchObjectStatus? Type601 { get; set; } + public global::System.Collections.Generic.IList? Type601 { get; set; } /// /// /// - public global::G.VectorStoreFileBatchObjectFileCounts? Type602 { get; set; } + public global::G.ToolsItem4? Type602 { get; set; } /// /// /// - public global::G.CreateVectorStoreFileBatchRequest? Type603 { get; set; } + public global::G.CreateRunRequestToolDiscriminator? Type603 { get; set; } /// /// /// - public global::G.AssistantStreamEvent? Type604 { get; set; } + public global::G.CreateRunRequestToolDiscriminatorType? Type604 { get; set; } /// /// /// - public global::G.ErrorEvent? Type605 { get; set; } + public global::G.CreateSpeechRequest? Type605 { get; set; } /// /// /// - public global::G.ErrorEventEvent? Type606 { get; set; } + public global::G.AnyOf? Type606 { get; set; } /// /// /// - public global::G.DoneEvent? Type607 { get; set; } + public global::G.CreateSpeechRequestModel? Type607 { get; set; } /// /// /// - public global::G.DoneEventEvent? Type608 { get; set; } + public global::G.CreateSpeechRequestVoice? Type608 { get; set; } /// /// /// - public global::G.DoneEventData? Type609 { get; set; } + public global::G.CreateSpeechRequestResponseFormat? Type609 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant3? Type610 { get; set; } + public global::G.CreateThreadAndRunRequest? Type610 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant3Event? Type611 { get; set; } + public global::G.CreateThreadRequest? Type611 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant4? Type612 { get; set; } + public global::G.CreateThreadRequestToolResources? Type612 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant4Event? Type613 { get; set; } + public global::G.CreateThreadRequestToolResourcesCodeInterpreter? Type613 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant5? Type614 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearch? Type614 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant5Event? Type615 { get; set; } + public global::System.Collections.Generic.IList? Type615 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant6? Type616 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStore? Type616 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant6Event? Type617 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy? Type617 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant7? Type618 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1? Type618 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant7Event? Type619 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type? Type619 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant8? Type620 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2? Type620 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant8Event? Type621 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type? Type621 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant9? Type622 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static? Type622 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant9Event? Type623 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator? Type623 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant10? Type624 { get; set; } + public global::G.CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType? Type624 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant10Event? Type625 { get; set; } + public global::G.AnyOf? Type625 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant11? Type626 { get; set; } + public global::G.CreateThreadAndRunRequestModel? Type626 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant11Event? Type627 { get; set; } + public global::System.Collections.Generic.IList? Type627 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant12? Type628 { get; set; } + public global::G.ToolsItem5? Type628 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant12Event? Type629 { get; set; } + public global::G.CreateThreadAndRunRequestToolDiscriminator? Type629 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant13? Type630 { get; set; } + public global::G.CreateThreadAndRunRequestToolDiscriminatorType? Type630 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant13Event? Type631 { get; set; } + public global::G.CreateThreadAndRunRequestToolResources? Type631 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant14? Type632 { get; set; } + public global::G.CreateThreadAndRunRequestToolResourcesCodeInterpreter? Type632 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant14Event? Type633 { get; set; } + public global::G.CreateThreadAndRunRequestToolResourcesFileSearch? Type633 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant15? Type634 { get; set; } + public global::G.CreateTranscriptionRequest? Type634 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant15Event? Type635 { get; set; } + public global::G.AnyOf? Type635 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant16? Type636 { get; set; } + public global::G.CreateTranscriptionRequestModel? Type636 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant16Event? Type637 { get; set; } + public global::System.Collections.Generic.IList? Type637 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant17? Type638 { get; set; } + public global::G.CreateTranscriptionRequestTimestampGranularitie? Type638 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant17Event? Type639 { get; set; } + public global::G.CreateTranscriptionResponseJson? Type639 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant18? Type640 { get; set; } + public global::G.CreateTranscriptionResponseVerboseJson? Type640 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant18Event? Type641 { get; set; } + public global::System.Collections.Generic.IList? Type641 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant19? Type642 { get; set; } + public global::G.TranscriptionWord? Type642 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant19Event? Type643 { get; set; } + public float? Type643 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant20? Type644 { get; set; } + public global::System.Collections.Generic.IList? Type644 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant20Event? Type645 { get; set; } + public global::G.TranscriptionSegment? Type645 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant21? Type646 { get; set; } + public global::G.CreateTranslationRequest? Type646 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant21Event? Type647 { get; set; } + public global::G.AnyOf? Type647 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant22? Type648 { get; set; } + public global::G.CreateTranslationRequestModel? Type648 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant22Event? Type649 { get; set; } + public global::G.CreateTranslationResponseJson? Type649 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant23? Type650 { get; set; } + public global::G.CreateTranslationResponseVerboseJson? Type650 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant23Event? Type651 { get; set; } + public global::G.CreateUploadRequest? Type651 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant24? Type652 { get; set; } + public global::G.CreateUploadRequestPurpose? Type652 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant24Event? Type653 { get; set; } + public global::G.CreateVectorStoreFileBatchRequest? Type653 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant25? Type654 { get; set; } + public global::G.CreateVectorStoreFileRequest? Type654 { get; set; } /// /// /// - public global::G.AssistantStreamEventVariant25Event? Type655 { get; set; } + public global::G.CreateVectorStoreRequest? Type655 { get; set; } /// /// /// - public global::G.AssistantStreamEventDiscriminator? Type656 { get; set; } + public global::G.VectorStoreExpirationAfter? Type656 { get; set; } /// /// /// - public global::G.AssistantStreamEventDiscriminatorEvent? Type657 { get; set; } + public global::G.VectorStoreExpirationAfterAnchor? Type657 { get; set; } /// /// /// - public global::G.Batch? Type658 { get; set; } + public global::G.CreateVectorStoreRequestChunkingStrategy? Type658 { get; set; } /// /// /// - public global::G.BatchObject? Type659 { get; set; } + public global::G.CreateVectorStoreRequestChunkingStrategyDiscriminator? Type659 { get; set; } /// /// /// - public global::G.BatchErrors? Type660 { get; set; } + public global::G.CreateVectorStoreRequestChunkingStrategyDiscriminatorType? Type660 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type661 { get; set; } + public global::G.DefaultProjectErrorResponse? Type661 { get; set; } /// /// /// - public global::G.BatchErrorsDataItem? Type662 { get; set; } + public global::G.DeleteAssistantResponse? Type662 { get; set; } /// /// /// - public global::G.BatchStatus? Type663 { get; set; } + public global::G.DeleteAssistantResponseObject? Type663 { get; set; } /// /// /// - public global::G.BatchRequestCounts? Type664 { get; set; } + public global::G.DeleteFileResponse? Type664 { get; set; } /// /// /// - public global::G.BatchRequestInput? Type665 { get; set; } + public global::G.DeleteFileResponseObject? Type665 { get; set; } /// /// /// - public global::G.BatchRequestInputMethod? Type666 { get; set; } + public global::G.DeleteMessageResponse? Type666 { get; set; } /// /// /// - public global::G.BatchRequestOutput? Type667 { get; set; } + public global::G.DeleteMessageResponseObject? Type667 { get; set; } /// /// /// - public global::G.BatchRequestOutputResponse? Type668 { get; set; } + public global::G.DeleteModelResponse? Type668 { get; set; } /// /// /// - public global::G.BatchRequestOutputError? Type669 { get; set; } + public global::G.DeleteThreadResponse? Type669 { get; set; } /// /// /// - public global::G.ListBatchesResponse? Type670 { get; set; } + public global::G.DeleteThreadResponseObject? Type670 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type671 { get; set; } + public global::G.DeleteVectorStoreFileResponse? Type671 { get; set; } /// /// /// - public global::G.ListBatchesResponseObject? Type672 { get; set; } + public global::G.DeleteVectorStoreFileResponseObject? Type672 { get; set; } /// /// /// - public global::G.AuditLogActorServiceAccount? Type673 { get; set; } + public global::G.DeleteVectorStoreResponse? Type673 { get; set; } /// /// /// - public global::G.AuditLogActorUser? Type674 { get; set; } + public global::G.DeleteVectorStoreResponseObject? Type674 { get; set; } /// /// /// - public global::G.AuditLogActorApiKey? Type675 { get; set; } + public global::G.ErrorResponse? Type675 { get; set; } /// /// /// - public global::G.AuditLogActorApiKeyType? Type676 { get; set; } + public global::G.FineTuneChatCompletionRequestAssistantMessage? Type676 { get; set; } /// /// /// - public global::G.AuditLogActorSession? Type677 { get; set; } + public global::G.FineTuneChatCompletionRequestAssistantMessageVariant1? Type677 { get; set; } /// /// /// - public global::G.AuditLogActor? Type678 { get; set; } + public global::G.FineTuningIntegration? Type678 { get; set; } /// /// /// - public global::G.AuditLogActorType? Type679 { get; set; } + public global::G.FineTuningIntegrationType? Type679 { get; set; } /// /// /// - public global::G.AuditLogEventType? Type680 { get; set; } + public global::G.FineTuningIntegrationWandb? Type680 { get; set; } /// /// /// - public global::G.AuditLog? Type681 { get; set; } + public global::G.FineTuningJob? Type681 { get; set; } /// /// /// - public global::G.AuditLogProject? Type682 { get; set; } + public global::G.FineTuningJobError? Type682 { get; set; } /// /// /// - public global::G.AuditLogApiKeyCreated? Type683 { get; set; } + public global::G.FineTuningJobHyperparameters? Type683 { get; set; } /// /// /// - public global::G.AuditLogApiKeyCreatedData? Type684 { get; set; } + public global::G.OneOf? Type684 { get; set; } /// /// /// - public global::G.AuditLogApiKeyUpdated? Type685 { get; set; } + public global::G.FineTuningJobHyperparametersNEpochs? Type685 { get; set; } /// /// /// - public global::G.AuditLogApiKeyUpdatedChangesRequested? Type686 { get; set; } + public global::G.FineTuningJobObject? Type686 { get; set; } /// /// /// - public global::G.AuditLogApiKeyDeleted? Type687 { get; set; } + public global::G.FineTuningJobStatus? Type687 { get; set; } /// /// /// - public global::G.AuditLogInviteSent? Type688 { get; set; } + public global::System.Collections.Generic.IList>? Type688 { get; set; } /// /// /// - public global::G.AuditLogInviteSentData? Type689 { get; set; } + public global::G.OneOf? Type689 { get; set; } /// /// /// - public global::G.AuditLogInviteAccepted? Type690 { get; set; } + public global::G.FineTuningJobCheckpoint? Type690 { get; set; } /// /// /// - public global::G.AuditLogInviteDeleted? Type691 { get; set; } + public global::G.FineTuningJobCheckpointMetrics? Type691 { get; set; } /// /// /// - public global::G.AuditLogLoginFailed? Type692 { get; set; } + public global::G.FineTuningJobCheckpointObject? Type692 { get; set; } /// /// /// - public global::G.AuditLogLogoutFailed? Type693 { get; set; } + public global::G.FineTuningJobEvent? Type693 { get; set; } /// /// /// - public global::G.AuditLogOrganizationUpdated? Type694 { get; set; } + public global::G.FineTuningJobEventLevel? Type694 { get; set; } /// /// /// - public global::G.AuditLogOrganizationUpdatedChangesRequested? Type695 { get; set; } + public global::G.FineTuningJobEventObject? Type695 { get; set; } /// /// /// - public global::G.AuditLogOrganizationUpdatedChangesRequestedSettings? Type696 { get; set; } + public global::G.FinetuneChatRequestInput? Type696 { get; set; } /// /// /// - public global::G.AuditLogProjectCreated? Type697 { get; set; } + public global::System.Collections.Generic.IList>? Type697 { get; set; } /// /// /// - public global::G.AuditLogProjectCreatedData? Type698 { get; set; } + public global::G.OneOf? Type698 { get; set; } /// /// /// - public global::G.AuditLogProjectUpdated? Type699 { get; set; } + public global::G.FinetuneCompletionRequestInput? Type699 { get; set; } /// /// /// - public global::G.AuditLogProjectUpdatedChangesRequested? Type700 { get; set; } + public global::G.Image? Type700 { get; set; } /// /// /// - public global::G.AuditLogProjectArchived? Type701 { get; set; } + public global::G.ImagesResponse? Type701 { get; set; } /// /// /// - public global::G.AuditLogServiceAccountCreated? Type702 { get; set; } + public global::System.Collections.Generic.IList? Type702 { get; set; } /// /// /// - public global::G.AuditLogServiceAccountCreatedData? Type703 { get; set; } + public global::G.Invite? Type703 { get; set; } /// /// /// - public global::G.AuditLogServiceAccountUpdated? Type704 { get; set; } + public global::G.InviteObject? Type704 { get; set; } /// /// /// - public global::G.AuditLogServiceAccountUpdatedChangesRequested? Type705 { get; set; } + public global::G.InviteRole? Type705 { get; set; } /// /// /// - public global::G.AuditLogServiceAccountDeleted? Type706 { get; set; } + public global::G.InviteStatus? Type706 { get; set; } /// /// /// - public global::G.AuditLogUserAdded? Type707 { get; set; } + public global::G.InviteDeleteResponse? Type707 { get; set; } /// /// /// - public global::G.AuditLogUserAddedData? Type708 { get; set; } + public global::G.InviteDeleteResponseObject? Type708 { get; set; } /// /// /// - public global::G.AuditLogUserUpdated? Type709 { get; set; } + public global::G.InviteListResponse? Type709 { get; set; } /// /// /// - public global::G.AuditLogUserUpdatedChangesRequested? Type710 { get; set; } + public global::G.InviteListResponseObject? Type710 { get; set; } /// /// /// - public global::G.AuditLogUserDeleted? Type711 { get; set; } + public global::System.Collections.Generic.IList? Type711 { get; set; } /// /// /// - public global::G.ListAuditLogsResponse? Type712 { get; set; } + public global::G.InviteRequest? Type712 { get; set; } /// /// /// - public global::G.ListAuditLogsResponseObject? Type713 { get; set; } + public global::G.InviteRequestRole? Type713 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type714 { get; set; } + public global::G.ListAssistantsResponse? Type714 { get; set; } /// /// /// - public global::G.Invite? Type715 { get; set; } + public global::System.Collections.Generic.IList? Type715 { get; set; } /// /// /// - public global::G.InviteObject? Type716 { get; set; } + public global::G.ListAuditLogsResponse? Type716 { get; set; } /// /// /// - public global::G.InviteRole? Type717 { get; set; } + public global::G.ListAuditLogsResponseObject? Type717 { get; set; } /// /// /// - public global::G.InviteStatus? Type718 { get; set; } + public global::System.Collections.Generic.IList? Type718 { get; set; } /// /// /// - public global::G.InviteListResponse? Type719 { get; set; } + public global::G.ListBatchesResponse? Type719 { get; set; } /// /// /// - public global::G.InviteListResponseObject? Type720 { get; set; } + public global::System.Collections.Generic.IList? Type720 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type721 { get; set; } + public global::G.ListBatchesResponseObject? Type721 { get; set; } /// /// /// - public global::G.InviteRequest? Type722 { get; set; } + public global::G.ListFilesResponse? Type722 { get; set; } /// /// /// - public global::G.InviteRequestRole? Type723 { get; set; } + public global::System.Collections.Generic.IList? Type723 { get; set; } /// /// /// - public global::G.InviteDeleteResponse? Type724 { get; set; } + public global::G.OpenAIFile? Type724 { get; set; } /// /// /// - public global::G.InviteDeleteResponseObject? Type725 { get; set; } + public global::G.OpenAIFileObject? Type725 { get; set; } /// /// /// - public global::G.User? Type726 { get; set; } + public global::G.OpenAIFilePurpose? Type726 { get; set; } /// /// /// - public global::G.UserObject? Type727 { get; set; } + public global::G.OpenAIFileStatus? Type727 { get; set; } /// /// /// - public global::G.UserRole? Type728 { get; set; } + public global::G.ListFineTuningJobCheckpointsResponse? Type728 { get; set; } /// /// /// - public global::G.UserListResponse? Type729 { get; set; } + public global::System.Collections.Generic.IList? Type729 { get; set; } /// /// /// - public global::G.UserListResponseObject? Type730 { get; set; } + public global::G.ListFineTuningJobCheckpointsResponseObject? Type730 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type731 { get; set; } + public global::G.ListFineTuningJobEventsResponse? Type731 { get; set; } /// /// /// - public global::G.UserRoleUpdateRequest? Type732 { get; set; } + public global::System.Collections.Generic.IList? Type732 { get; set; } /// /// /// - public global::G.UserRoleUpdateRequestRole? Type733 { get; set; } + public global::G.ListFineTuningJobEventsResponseObject? Type733 { get; set; } /// /// /// - public global::G.UserDeleteResponse? Type734 { get; set; } + public global::G.ListMessagesResponse? Type734 { get; set; } /// /// /// - public global::G.UserDeleteResponseObject? Type735 { get; set; } + public global::System.Collections.Generic.IList? Type735 { get; set; } /// /// /// - public global::G.Project? Type736 { get; set; } + public global::G.ListModelsResponse? Type736 { get; set; } /// /// /// - public global::G.ProjectObject? Type737 { get; set; } + public global::G.ListModelsResponseObject? Type737 { get; set; } /// /// /// - public global::G.ProjectStatus? Type738 { get; set; } + public global::System.Collections.Generic.IList? Type738 { get; set; } /// /// /// - public global::G.ProjectListResponse? Type739 { get; set; } + public global::G.Model15? Type739 { get; set; } /// /// /// - public global::G.ProjectListResponseObject? Type740 { get; set; } + public global::G.ModelObject? Type740 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type741 { get; set; } + public global::G.ListPaginatedFineTuningJobsResponse? Type741 { get; set; } /// /// /// - public global::G.ProjectCreateRequest? Type742 { get; set; } + public global::System.Collections.Generic.IList? Type742 { get; set; } /// /// /// - public global::G.ProjectUpdateRequest? Type743 { get; set; } + public global::G.ListPaginatedFineTuningJobsResponseObject? Type743 { get; set; } /// /// /// - public global::G.DefaultProjectErrorResponse? Type744 { get; set; } + public global::G.ListRunStepsResponse? Type744 { get; set; } /// /// /// - public global::G.ProjectUser? Type745 { get; set; } + public global::System.Collections.Generic.IList? Type745 { get; set; } /// /// /// - public global::G.ProjectUserObject? Type746 { get; set; } + public global::G.ListRunsResponse? Type746 { get; set; } /// /// /// - public global::G.ProjectUserRole? Type747 { get; set; } + public global::System.Collections.Generic.IList? Type747 { get; set; } /// /// /// - public global::G.ProjectUserListResponse? Type748 { get; set; } + public global::G.ListThreadsResponse? Type748 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type749 { get; set; } + public global::System.Collections.Generic.IList? Type749 { get; set; } /// /// /// - public global::G.ProjectUserCreateRequest? Type750 { get; set; } + public global::G.ListVectorStoreFilesResponse? Type750 { get; set; } /// /// /// - public global::G.ProjectUserCreateRequestRole? Type751 { get; set; } + public global::System.Collections.Generic.IList? Type751 { get; set; } /// /// /// - public global::G.ProjectUserUpdateRequest? Type752 { get; set; } + public global::G.VectorStoreFileObject? Type752 { get; set; } /// /// /// - public global::G.ProjectUserUpdateRequestRole? Type753 { get; set; } + public global::G.VectorStoreFileObjectObject? Type753 { get; set; } /// /// /// - public global::G.ProjectUserDeleteResponse? Type754 { get; set; } + public global::G.VectorStoreFileObjectStatus? Type754 { get; set; } /// /// /// - public global::G.ProjectUserDeleteResponseObject? Type755 { get; set; } + public global::G.VectorStoreFileObjectLastError? Type755 { get; set; } /// /// /// - public global::G.ProjectServiceAccount? Type756 { get; set; } + public global::G.VectorStoreFileObjectLastErrorCode? Type756 { get; set; } /// /// /// - public global::G.ProjectServiceAccountObject? Type757 { get; set; } + public global::G.VectorStoreFileObjectChunkingStrategy? Type757 { get; set; } /// /// /// - public global::G.ProjectServiceAccountRole? Type758 { get; set; } + public global::G.StaticChunkingStrategyResponseParam? Type758 { get; set; } /// /// /// - public global::G.ProjectServiceAccountListResponse? Type759 { get; set; } + public global::G.StaticChunkingStrategyResponseParamType? Type759 { get; set; } /// /// /// - public global::G.ProjectServiceAccountListResponseObject? Type760 { get; set; } + public global::G.OtherChunkingStrategyResponseParam? Type760 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type761 { get; set; } + public global::G.OtherChunkingStrategyResponseParamType? Type761 { get; set; } /// /// /// - public global::G.ProjectServiceAccountCreateRequest? Type762 { get; set; } + public global::G.VectorStoreFileObjectChunkingStrategyDiscriminator? Type762 { get; set; } /// /// /// - public global::G.ProjectServiceAccountCreateResponse? Type763 { get; set; } + public global::G.VectorStoreFileObjectChunkingStrategyDiscriminatorType? Type763 { get; set; } /// /// /// - public global::G.ProjectServiceAccountCreateResponseObject? Type764 { get; set; } + public global::G.ListVectorStoresResponse? Type764 { get; set; } /// /// /// - public global::G.ProjectServiceAccountCreateResponseRole? Type765 { get; set; } + public global::System.Collections.Generic.IList? Type765 { get; set; } /// /// /// - public global::G.ProjectServiceAccountApiKey? Type766 { get; set; } + public global::G.VectorStoreObject? Type766 { get; set; } /// /// /// - public global::G.ProjectServiceAccountApiKeyObject? Type767 { get; set; } + public global::G.VectorStoreObjectObject? Type767 { get; set; } /// /// /// - public global::G.ProjectServiceAccountDeleteResponse? Type768 { get; set; } + public global::G.VectorStoreObjectFileCounts? Type768 { get; set; } /// /// /// - public global::G.ProjectServiceAccountDeleteResponseObject? Type769 { get; set; } + public global::G.VectorStoreObjectStatus? Type769 { get; set; } /// /// /// - public global::G.ProjectApiKey? Type770 { get; set; } + public global::G.ModifyAssistantRequest? Type770 { get; set; } /// /// /// - public global::G.ProjectApiKeyObject? Type771 { get; set; } + public global::System.Collections.Generic.IList? Type771 { get; set; } /// /// /// - public global::G.ProjectApiKeyOwner? Type772 { get; set; } + public global::G.ToolsItem7? Type772 { get; set; } /// /// /// - public global::G.ProjectApiKeyOwnerType? Type773 { get; set; } + public global::G.ModifyAssistantRequestToolDiscriminator? Type773 { get; set; } /// /// /// - public global::G.ProjectApiKeyListResponse? Type774 { get; set; } + public global::G.ModifyAssistantRequestToolDiscriminatorType? Type774 { get; set; } /// /// /// - public global::G.ProjectApiKeyListResponseObject? Type775 { get; set; } + public global::G.ModifyAssistantRequestToolResources? Type775 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type776 { get; set; } + public global::G.ModifyAssistantRequestToolResourcesCodeInterpreter? Type776 { get; set; } /// /// /// - public global::G.ProjectApiKeyDeleteResponse? Type777 { get; set; } + public global::G.ModifyAssistantRequestToolResourcesFileSearch? Type777 { get; set; } /// /// /// - public global::G.ProjectApiKeyDeleteResponseObject? Type778 { get; set; } + public global::G.ModifyMessageRequest? Type778 { get; set; } /// /// /// - public global::G.CreateBatchRequest? Type779 { get; set; } + public global::G.ModifyRunRequest? Type779 { get; set; } /// /// /// - public global::G.CreateBatchRequestEndpoint? Type780 { get; set; } + public global::G.ModifyThreadRequest? Type780 { get; set; } /// /// /// - public global::G.CreateBatchRequestCompletionWindow? Type781 { get; set; } + public global::G.ModifyThreadRequestToolResources? Type781 { get; set; } /// /// /// - public global::System.Collections.Generic.Dictionary? Type782 { get; set; } + public global::G.ModifyThreadRequestToolResourcesCodeInterpreter? Type782 { get; set; } /// /// /// - public global::G.ListAssistantsOrder? Type783 { get; set; } + public global::G.ModifyThreadRequestToolResourcesFileSearch? Type783 { get; set; } /// /// /// - public global::G.ListMessagesOrder? Type784 { get; set; } + public global::G.Project? Type784 { get; set; } /// /// /// - public global::G.ListRunsOrder? Type785 { get; set; } + public global::G.ProjectObject? Type785 { get; set; } /// /// /// - public global::G.ListRunStepsOrder? Type786 { get; set; } + public global::G.ProjectStatus? Type786 { get; set; } /// /// /// - public global::G.ListVectorStoresOrder? Type787 { get; set; } + public global::G.ProjectApiKey? Type787 { get; set; } /// /// /// - public global::G.ListVectorStoreFilesOrder? Type788 { get; set; } + public global::G.ProjectApiKeyObject? Type788 { get; set; } /// /// /// - public global::G.ListVectorStoreFilesFilter? Type789 { get; set; } + public global::G.ProjectApiKeyOwner? Type789 { get; set; } /// /// /// - public global::G.ListFilesInVectorStoreBatchOrder? Type790 { get; set; } + public global::G.ProjectApiKeyOwnerType? Type790 { get; set; } /// /// /// - public global::G.ListFilesInVectorStoreBatchFilter? Type791 { get; set; } + public global::G.ProjectUser? Type791 { get; set; } /// /// /// - public global::G.ListAuditLogsEffectiveAt? Type792 { get; set; } + public global::G.ProjectUserObject? Type792 { get; set; } /// /// /// - public global::System.Collections.Generic.IList? Type793 { get; set; } + public global::G.ProjectUserRole? Type793 { get; set; } /// /// /// - public global::G.OneOf? Type794 { get; set; } + public global::G.ProjectServiceAccount? Type794 { get; set; } /// /// /// - public global::G.OneOf? Type795 { get; set; } + public global::G.ProjectServiceAccountObject? Type795 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountRole? Type796 { get; set; } + /// + /// + /// + public global::G.ProjectApiKeyDeleteResponse? Type797 { get; set; } + /// + /// + /// + public global::G.ProjectApiKeyDeleteResponseObject? Type798 { get; set; } + /// + /// + /// + public global::G.ProjectApiKeyListResponse? Type799 { get; set; } + /// + /// + /// + public global::G.ProjectApiKeyListResponseObject? Type800 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type801 { get; set; } + /// + /// + /// + public global::G.ProjectCreateRequest? Type802 { get; set; } + /// + /// + /// + public global::G.ProjectListResponse? Type803 { get; set; } + /// + /// + /// + public global::G.ProjectListResponseObject? Type804 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type805 { get; set; } + /// + /// + /// + public global::G.ProjectRateLimit? Type806 { get; set; } + /// + /// + /// + public global::G.ProjectRateLimitObject? Type807 { get; set; } + /// + /// + /// + public global::G.ProjectRateLimitListResponse? Type808 { get; set; } + /// + /// + /// + public global::G.ProjectRateLimitListResponseObject? Type809 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type810 { get; set; } + /// + /// + /// + public global::G.ProjectRateLimitUpdateRequest? Type811 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountApiKey? Type812 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountApiKeyObject? Type813 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountCreateRequest? Type814 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountCreateResponse? Type815 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountCreateResponseObject? Type816 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountCreateResponseRole? Type817 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountDeleteResponse? Type818 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountDeleteResponseObject? Type819 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountListResponse? Type820 { get; set; } + /// + /// + /// + public global::G.ProjectServiceAccountListResponseObject? Type821 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type822 { get; set; } + /// + /// + /// + public global::G.ProjectUpdateRequest? Type823 { get; set; } + /// + /// + /// + public global::G.ProjectUserCreateRequest? Type824 { get; set; } + /// + /// + /// + public global::G.ProjectUserCreateRequestRole? Type825 { get; set; } + /// + /// + /// + public global::G.ProjectUserDeleteResponse? Type826 { get; set; } + /// + /// + /// + public global::G.ProjectUserDeleteResponseObject? Type827 { get; set; } + /// + /// + /// + public global::G.ProjectUserListResponse? Type828 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type829 { get; set; } + /// + /// + /// + public global::G.ProjectUserUpdateRequest? Type830 { get; set; } + /// + /// + /// + public global::G.ProjectUserUpdateRequestRole? Type831 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventConversationItemCreate? Type832 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventConversationItemCreateType? Type833 { get; set; } + /// + /// + /// + public global::G.RealtimeConversationItem? Type834 { get; set; } + /// + /// + /// + public global::G.RealtimeConversationItemType? Type835 { get; set; } + /// + /// + /// + public global::G.RealtimeConversationItemObject? Type836 { get; set; } + /// + /// + /// + public global::G.RealtimeConversationItemStatus? Type837 { get; set; } + /// + /// + /// + public global::G.RealtimeConversationItemRole? Type838 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type839 { get; set; } + /// + /// + /// + public global::G.RealtimeConversationItemContentItem? Type840 { get; set; } + /// + /// + /// + public global::G.RealtimeConversationItemContentItemType? Type841 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventConversationItemDelete? Type842 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventConversationItemDeleteType? Type843 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventConversationItemTruncate? Type844 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventConversationItemTruncateType? Type845 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventInputAudioBufferAppend? Type846 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventInputAudioBufferAppendType? Type847 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventInputAudioBufferClear? Type848 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventInputAudioBufferClearType? Type849 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventInputAudioBufferCommit? Type850 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventInputAudioBufferCommitType? Type851 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventResponseCancel? Type852 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventResponseCancelType? Type853 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventResponseCreate? Type854 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventResponseCreateType? Type855 { get; set; } + /// + /// + /// + public global::G.RealtimeSession? Type856 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type857 { get; set; } + /// + /// + /// + public global::G.RealtimeSessionModalitie? Type858 { get; set; } + /// + /// + /// + public global::G.RealtimeSessionVoice? Type859 { get; set; } + /// + /// + /// + public global::G.RealtimeSessionInputAudioTranscription? Type860 { get; set; } + /// + /// + /// + public global::G.RealtimeSessionTurnDetection? Type861 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type862 { get; set; } + /// + /// + /// + public global::G.RealtimeSessionTool? Type863 { get; set; } + /// + /// + /// + public global::G.RealtimeSessionToolType? Type864 { get; set; } + /// + /// + /// + public global::G.OneOf? Type865 { get; set; } + /// + /// + /// + public global::G.RealtimeSessionMaxResponseOutputTokens? Type866 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventSessionUpdate? Type867 { get; set; } + /// + /// + /// + public global::G.RealtimeClientEventSessionUpdateType? Type868 { get; set; } + /// + /// + /// + public global::G.RealtimeResponse? Type869 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseObject? Type870 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseStatus? Type871 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseStatusDetails? Type872 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseStatusDetailsType? Type873 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseStatusDetailsReason? Type874 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseStatusDetailsError? Type875 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type876 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseUsage? Type877 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseUsageInputTokenDetails? Type878 { get; set; } + /// + /// + /// + public global::G.RealtimeResponseUsageOutputTokenDetails? Type879 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationCreated? Type880 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationCreatedType? Type881 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationCreatedConversation? Type882 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemCreated? Type883 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemCreatedType? Type884 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemDeleted? Type885 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemDeletedType? Type886 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted? Type887 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType? Type888 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailed? Type889 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedType? Type890 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError? Type891 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemTruncated? Type892 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventConversationItemTruncatedType? Type893 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventError? Type894 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventErrorType? Type895 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventErrorError? Type896 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferCleared? Type897 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferClearedType? Type898 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferCommitted? Type899 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferCommittedType? Type900 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferSpeechStarted? Type901 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferSpeechStartedType? Type902 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferSpeechStopped? Type903 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventInputAudioBufferSpeechStoppedType? Type904 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventRateLimitsUpdated? Type905 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventRateLimitsUpdatedType? Type906 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type907 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventRateLimitsUpdatedRateLimit? Type908 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioDelta? Type909 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioDeltaType? Type910 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioDone? Type911 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioDoneType? Type912 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioTranscriptDelta? Type913 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioTranscriptDeltaType? Type914 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioTranscriptDone? Type915 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseAudioTranscriptDoneType? Type916 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseContentPartAdded? Type917 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseContentPartAddedType? Type918 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseContentPartAddedPart? Type919 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseContentPartAddedPartType? Type920 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseContentPartDone? Type921 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseContentPartDoneType? Type922 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseContentPartDonePart? Type923 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseCreated? Type924 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseCreatedType? Type925 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseDone? Type926 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseDoneType? Type927 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDelta? Type928 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDeltaType? Type929 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDone? Type930 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseFunctionCallArgumentsDoneType? Type931 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseOutputItemAdded? Type932 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseOutputItemAddedType? Type933 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseOutputItemDone? Type934 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseOutputItemDoneType? Type935 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseTextDelta? Type936 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseTextDeltaType? Type937 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseTextDone? Type938 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventResponseTextDoneType? Type939 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventSessionCreated? Type940 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventSessionCreatedType? Type941 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventSessionUpdated? Type942 { get; set; } + /// + /// + /// + public global::G.RealtimeServerEventSessionUpdatedType? Type943 { get; set; } + /// + /// + /// + public global::G.SubmitToolOutputsRunRequest? Type944 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type945 { get; set; } + /// + /// + /// + public global::G.SubmitToolOutputsRunRequestToolOutput? Type946 { get; set; } + /// + /// + /// + public global::G.UpdateVectorStoreRequest? Type947 { get; set; } + /// + /// + /// + public global::G.Upload? Type948 { get; set; } + /// + /// + /// + public global::G.UploadStatus? Type949 { get; set; } + /// + /// + /// + public global::G.UploadObject? Type950 { get; set; } + /// + /// + /// + public global::G.UploadPart? Type951 { get; set; } + /// + /// + /// + public global::G.UploadPartObject? Type952 { get; set; } + /// + /// + /// + public global::G.UsageAudioSpeechesResult? Type953 { get; set; } + /// + /// + /// + public global::G.UsageAudioSpeechesResultObject? Type954 { get; set; } + /// + /// + /// + public global::G.UsageAudioTranscriptionsResult? Type955 { get; set; } + /// + /// + /// + public global::G.UsageAudioTranscriptionsResultObject? Type956 { get; set; } + /// + /// + /// + public global::G.UsageCodeInterpreterSessionsResult? Type957 { get; set; } + /// + /// + /// + public global::G.UsageCodeInterpreterSessionsResultObject? Type958 { get; set; } + /// + /// + /// + public global::G.UsageCompletionsResult? Type959 { get; set; } + /// + /// + /// + public global::G.UsageCompletionsResultObject? Type960 { get; set; } + /// + /// + /// + public global::G.UsageEmbeddingsResult? Type961 { get; set; } + /// + /// + /// + public global::G.UsageEmbeddingsResultObject? Type962 { get; set; } + /// + /// + /// + public global::G.UsageImagesResult? Type963 { get; set; } + /// + /// + /// + public global::G.UsageImagesResultObject? Type964 { get; set; } + /// + /// + /// + public global::G.UsageModerationsResult? Type965 { get; set; } + /// + /// + /// + public global::G.UsageModerationsResultObject? Type966 { get; set; } + /// + /// + /// + public global::G.UsageResponse? Type967 { get; set; } + /// + /// + /// + public global::G.UsageResponseObject? Type968 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type969 { get; set; } + /// + /// + /// + public global::G.UsageTimeBucket? Type970 { get; set; } + /// + /// + /// + public global::G.UsageTimeBucketObject? Type971 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type972 { get; set; } + /// + /// + /// + public global::G.ResultItem? Type973 { get; set; } + /// + /// + /// + public global::G.UsageVectorStoresResult? Type974 { get; set; } + /// + /// + /// + public global::G.UsageVectorStoresResultObject? Type975 { get; set; } + /// + /// + /// + public global::G.UsageTimeBucketResultItemDiscriminator? Type976 { get; set; } + /// + /// + /// + public global::G.UsageTimeBucketResultItemDiscriminatorObject? Type977 { get; set; } + /// + /// + /// + public global::G.User? Type978 { get; set; } + /// + /// + /// + public global::G.UserObject? Type979 { get; set; } + /// + /// + /// + public global::G.UserRole? Type980 { get; set; } + /// + /// + /// + public global::G.UserDeleteResponse? Type981 { get; set; } + /// + /// + /// + public global::G.UserDeleteResponseObject? Type982 { get; set; } + /// + /// + /// + public global::G.UserListResponse? Type983 { get; set; } + /// + /// + /// + public global::G.UserListResponseObject? Type984 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type985 { get; set; } + /// + /// + /// + public global::G.UserRoleUpdateRequest? Type986 { get; set; } + /// + /// + /// + public global::G.UserRoleUpdateRequestRole? Type987 { get; set; } + /// + /// + /// + public global::G.VectorStoreFileBatchObject? Type988 { get; set; } + /// + /// + /// + public global::G.VectorStoreFileBatchObjectObject? Type989 { get; set; } + /// + /// + /// + public global::G.VectorStoreFileBatchObjectStatus? Type990 { get; set; } + /// + /// + /// + public global::G.VectorStoreFileBatchObjectFileCounts? Type991 { get; set; } + /// + /// + /// + public global::G.CreateBatchRequest? Type992 { get; set; } + /// + /// + /// + public global::G.CreateBatchRequestEndpoint? Type993 { get; set; } + /// + /// + /// + public global::G.CreateBatchRequestCompletionWindow? Type994 { get; set; } + /// + /// + /// + public global::G.ListAssistantsOrder? Type995 { get; set; } + /// + /// + /// + public global::G.ListFilesOrder? Type996 { get; set; } + /// + /// + /// + public global::G.ListAuditLogsEffectiveAt? Type997 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type998 { get; set; } + /// + /// + /// + public global::G.UsageCostsBucketWidth? Type999 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1000 { get; set; } + /// + /// + /// + public global::G.UsageCostsGroupByItem? Type1001 { get; set; } + /// + /// + /// + public global::G.UsageAudioSpeechesBucketWidth? Type1002 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1003 { get; set; } + /// + /// + /// + public global::G.UsageAudioSpeechesGroupByItem? Type1004 { get; set; } + /// + /// + /// + public global::G.UsageAudioTranscriptionsBucketWidth? Type1005 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1006 { get; set; } + /// + /// + /// + public global::G.UsageAudioTranscriptionsGroupByItem? Type1007 { get; set; } + /// + /// + /// + public global::G.UsageCodeInterpreterSessionsBucketWidth? Type1008 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1009 { get; set; } + /// + /// + /// + public global::G.UsageCodeInterpreterSessionsGroupByItem? Type1010 { get; set; } + /// + /// + /// + public global::G.UsageCompletionsBucketWidth? Type1011 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1012 { get; set; } + /// + /// + /// + public global::G.UsageCompletionsGroupByItem? Type1013 { get; set; } + /// + /// + /// + public global::G.UsageEmbeddingsBucketWidth? Type1014 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1015 { get; set; } + /// + /// + /// + public global::G.UsageEmbeddingsGroupByItem? Type1016 { get; set; } + /// + /// + /// + public global::G.UsageImagesBucketWidth? Type1017 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1018 { get; set; } + /// + /// + /// + public global::G.UsageImagesSource? Type1019 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1020 { get; set; } + /// + /// + /// + public global::G.UsageImagesSize? Type1021 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1022 { get; set; } + /// + /// + /// + public global::G.UsageImagesGroupByItem? Type1023 { get; set; } + /// + /// + /// + public global::G.UsageModerationsBucketWidth? Type1024 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1025 { get; set; } + /// + /// + /// + public global::G.UsageModerationsGroupByItem? Type1026 { get; set; } + /// + /// + /// + public global::G.UsageVectorStoresBucketWidth? Type1027 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1028 { get; set; } + /// + /// + /// + public global::G.UsageVectorStoresGroupByItem? Type1029 { get; set; } + /// + /// + /// + public global::G.ListMessagesOrder? Type1030 { get; set; } + /// + /// + /// + public global::G.ListRunsOrder? Type1031 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1032 { get; set; } + /// + /// + /// + public global::G.CreateRunIncludeItem? Type1033 { get; set; } + /// + /// + /// + public global::G.ListRunStepsOrder? Type1034 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1035 { get; set; } + /// + /// + /// + public global::G.ListRunStepsIncludeItem? Type1036 { get; set; } + /// + /// + /// + public global::System.Collections.Generic.IList? Type1037 { get; set; } + /// + /// + /// + public global::G.GetRunStepIncludeItem? Type1038 { get; set; } + /// + /// + /// + public global::G.ListVectorStoresOrder? Type1039 { get; set; } + /// + /// + /// + public global::G.ListFilesInVectorStoreBatchOrder? Type1040 { get; set; } + /// + /// + /// + public global::G.ListFilesInVectorStoreBatchFilter? Type1041 { get; set; } + /// + /// + /// + public global::G.ListVectorStoreFilesOrder? Type1042 { get; set; } + /// + /// + /// + public global::G.ListVectorStoreFilesFilter? Type1043 { get; set; } + /// + /// + /// + public global::G.OneOf? Type1044 { get; set; } + /// + /// + /// + public global::G.OneOf? Type1045 { get; set; } } } \ No newline at end of file diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#OneOf.3.Json.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#OneOf.3.Json.g.verified.cs new file mode 100644 index 0000000000..d050e4d635 --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#OneOf.3.Json.g.verified.cs @@ -0,0 +1,93 @@ +//HintName: OneOf.3.Json.g.cs +#nullable enable + +namespace G +{ + public readonly partial struct OneOf + { + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerContext. + /// + public string ToJson( + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + this.GetType(), + jsonSerializerContext); + } + + /// + /// Serializes the current instance to a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public string ToJson( + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Serialize( + this, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerContext. + /// + public static global::G.OneOf? FromJson( + string json, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return global::System.Text.Json.JsonSerializer.Deserialize( + json, + typeof(global::G.OneOf), + jsonSerializerContext) as global::G.OneOf?; + } + + /// + /// Deserializes a JSON string using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::G.OneOf? FromJson( + string json, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.Deserialize>( + json, + jsonSerializerOptions); + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerContext. + /// + public static async global::System.Threading.Tasks.ValueTask?> FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.Serialization.JsonSerializerContext jsonSerializerContext) + { + return (await global::System.Text.Json.JsonSerializer.DeserializeAsync( + jsonStream, + typeof(global::G.OneOf), + jsonSerializerContext).ConfigureAwait(false)) as global::G.OneOf?; + } + + /// + /// Deserializes a JSON stream using the provided JsonSerializerOptions. + /// +#if NET8_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.RequiresUnreferencedCode("JSON serialization and deserialization might require types that cannot be statically analyzed. Use the overload that takes a JsonTypeInfo or JsonSerializerContext, or make sure all of the required types are preserved.")] + [global::System.Diagnostics.CodeAnalysis.RequiresDynamicCode("JSON serialization and deserialization might require types that cannot be statically analyzed and might need runtime code generation. Use System.Text.Json source generation for native AOT applications.")] +#endif + public static global::System.Threading.Tasks.ValueTask?> FromJsonStreamAsync( + global::System.IO.Stream jsonStream, + global::System.Text.Json.JsonSerializerOptions? jsonSerializerOptions = null) + { + return global::System.Text.Json.JsonSerializer.DeserializeAsync?>( + jsonStream, + jsonSerializerOptions); + } + } +} diff --git a/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#OneOf.3.g.verified.cs b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#OneOf.3.g.verified.cs new file mode 100644 index 0000000000..e4627d03aa --- /dev/null +++ b/src/tests/AutoSDK.SnapshotTests/Snapshots/openai/SystemTextJson/_#OneOf.3.g.verified.cs @@ -0,0 +1,265 @@ +//HintName: OneOf.3.g.cs + +#nullable enable + +namespace G +{ + /// + /// + /// + public readonly partial struct OneOf : global::System.IEquatable> + { + /// + /// + /// +#if NET6_0_OR_GREATER + public T1? Value1 { get; init; } +#else + public T1? Value1 { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Value1))] +#endif + public bool IsValue1 => Value1 != null; + + /// + /// + /// + public static implicit operator OneOf(T1 value) => new OneOf(value); + + /// + /// + /// + public static implicit operator T1?(OneOf @this) => @this.Value1; + + /// + /// + /// + public OneOf(T1? value) + { + Value1 = value; + } + + /// + /// + /// +#if NET6_0_OR_GREATER + public T2? Value2 { get; init; } +#else + public T2? Value2 { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Value2))] +#endif + public bool IsValue2 => Value2 != null; + + /// + /// + /// + public static implicit operator OneOf(T2 value) => new OneOf(value); + + /// + /// + /// + public static implicit operator T2?(OneOf @this) => @this.Value2; + + /// + /// + /// + public OneOf(T2? value) + { + Value2 = value; + } + + /// + /// + /// +#if NET6_0_OR_GREATER + public T3? Value3 { get; init; } +#else + public T3? Value3 { get; } +#endif + + /// + /// + /// +#if NET6_0_OR_GREATER + [global::System.Diagnostics.CodeAnalysis.MemberNotNullWhen(true, nameof(Value3))] +#endif + public bool IsValue3 => Value3 != null; + + /// + /// + /// + public static implicit operator OneOf(T3 value) => new OneOf(value); + + /// + /// + /// + public static implicit operator T3?(OneOf @this) => @this.Value3; + + /// + /// + /// + public OneOf(T3? value) + { + Value3 = value; + } + + /// + /// + /// + public OneOf( + T1? value1, + T2? value2, + T3? value3 + ) + { + Value1 = value1; + Value2 = value2; + Value3 = value3; + } + + /// + /// + /// + public object? Object => + Value3 as object ?? + Value2 as object ?? + Value1 as object + ; + + /// + /// + /// + public bool Validate() + { + return IsValue1 && !IsValue2 && !IsValue3 || !IsValue1 && IsValue2 && !IsValue3 || !IsValue1 && !IsValue2 && IsValue3; + } + + /// + /// + /// + public TResult? Match( + global::System.Func? value1 = null, + global::System.Func? value2 = null, + global::System.Func? value3 = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsValue1 && value1 != null) + { + return value1(Value1!); + } + else if (IsValue2 && value2 != null) + { + return value2(Value2!); + } + else if (IsValue3 && value3 != null) + { + return value3(Value3!); + } + + return default(TResult); + } + + /// + /// + /// + public void Match( + global::System.Action? value1 = null, + global::System.Action? value2 = null, + global::System.Action? value3 = null, + bool validate = true) + { + if (validate) + { + Validate(); + } + + if (IsValue1) + { + value1?.Invoke(Value1!); + } + else if (IsValue2) + { + value2?.Invoke(Value2!); + } + else if (IsValue3) + { + value3?.Invoke(Value3!); + } + } + + /// + /// + /// + public override int GetHashCode() + { + var fields = new object?[] + { + Value1, + typeof(T1), + Value2, + typeof(T2), + Value3, + typeof(T3), + }; + const int offset = unchecked((int)2166136261); + const int prime = 16777619; + static int HashCodeAggregator(int hashCode, object? value) => value == null + ? (hashCode ^ 0) * prime + : (hashCode ^ value.GetHashCode()) * prime; + + return global::System.Linq.Enumerable.Aggregate(fields, offset, HashCodeAggregator); + } + + /// + /// + /// + public bool Equals(OneOf other) + { + return + global::System.Collections.Generic.EqualityComparer.Default.Equals(Value1, other.Value1) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Value2, other.Value2) && + global::System.Collections.Generic.EqualityComparer.Default.Equals(Value3, other.Value3) + ; + } + + /// + /// + /// + public static bool operator ==(OneOf obj1, OneOf obj2) + { + return global::System.Collections.Generic.EqualityComparer>.Default.Equals(obj1, obj2); + } + + /// + /// + /// + public static bool operator !=(OneOf obj1, OneOf obj2) + { + return !(obj1 == obj2); + } + + /// + /// + /// + public override bool Equals(object? obj) + { + return obj is OneOf o && Equals(o); + } + } +} diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/ResolvedSchemas/_.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/ResolvedSchemas/_.verified.txt index db25775ac5..73dbf034e9 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/ResolvedSchemas/_.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/ResolvedSchemas/_.verified.txt @@ -1,1607 +1,1517 @@ [ + AddUploadPartRequest(class)[Uploads], + Data(byte[])[Uploads], + AssistantObject(class)[Assistants], + Id(string)[Assistants], + AssistantObjectObject(enum)[Assistants], + CreatedAt(int)[Assistants], + Name(string)[Assistants], + Description(string)[Assistants], + Model(string)[Assistants], + Instructions(string)[Assistants], + Tools(array)[Assistants], + ToolsItem(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearch(ref)[Assistants], + AssistantToolsFunction(ref)[Assistants], + AssistantObjectToolDiscriminator(class)[Assistants], + AssistantObjectToolDiscriminatorType(enum)[Assistants], + AssistantObjectToolResources(class)[Assistants], + AssistantObjectToolResourcesCodeInterpreter(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + AssistantObjectToolResourcesFileSearch(class)[Assistants], + VectorStoreIds(array)[Assistants], + VectorStoreIdsItem(string)[Assistants], + AssistantObjectMetadata(class)[Assistants], + Temperature(double)[Assistants], + TopP(double)[Assistants], + AssistantsApiResponseFormatOption(ref)[Assistants], + AssistantToolsCode(class)[Assistants], + AssistantToolsCodeType(enum)[Assistants], + AssistantToolsFileSearch(class)[Assistants], + AssistantToolsFileSearchType(enum)[Assistants], + AssistantToolsFileSearchFileSearch(class)[Assistants], + MaxNumResults(int)[Assistants], + FileSearchRankingOptions(ref)[Assistants], + FileSearchRankingOptions(class)[Assistants], + FileSearchRankingOptionsRanker(enum)[Assistants], + ScoreThreshold(double)[Assistants], + AssistantToolsFunction(class)[Assistants], + AssistantToolsFunctionType(enum)[Assistants], + FunctionObject(ref)[Assistants], + FunctionObject(class)[Assistants, Chat], + Description(string)[Assistants, Chat], + Name(string)[Assistants, Chat], + FunctionParameters(ref)[Assistants, Chat], + Strict(bool)[Assistants, Chat], + FunctionParameters(class)[Assistants, Chat], + AssistantsApiResponseFormatOption(oneOf)[Assistants], + AssistantsApiResponseFormatOptionEnum(enum)[Assistants], + ResponseFormatText(ref)[Assistants], + ResponseFormatJsonObject(ref)[Assistants], + ResponseFormatJsonSchema(ref)[Assistants], + ResponseFormatText(class)[Assistants, Chat], + ResponseFormatTextType(enum)[Assistants, Chat], + ResponseFormatJsonObject(class)[Assistants, Chat], + ResponseFormatJsonObjectType(enum)[Assistants, Chat], + ResponseFormatJsonSchema(class)[Assistants, Chat], + ResponseFormatJsonSchemaType(enum)[Assistants, Chat], + ResponseFormatJsonSchemaJsonSchema(class)[Assistants, Chat], + Description(string)[Assistants, Chat], + Name(string)[Assistants, Chat], + ResponseFormatJsonSchemaSchema(ref)[Assistants, Chat], + Strict(bool)[Assistants, Chat], + ResponseFormatJsonSchemaSchema(class)[Assistants, Chat], + AssistantStreamEvent(oneOf)[], + ErrorEvent(ref)[], + DoneEvent(ref)[], + AssistantStreamEventVariant3(class)[], + Enabled(bool)[], + AssistantStreamEventVariant3Event(enum)[], + ThreadObject(ref)[], + AssistantStreamEventVariant4(class)[], + AssistantStreamEventVariant4Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant5(class)[], + AssistantStreamEventVariant5Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant6(class)[], + AssistantStreamEventVariant6Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant7(class)[], + AssistantStreamEventVariant7Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant8(class)[], + AssistantStreamEventVariant8Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant9(class)[], + AssistantStreamEventVariant9Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant10(class)[], + AssistantStreamEventVariant10Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant11(class)[], + AssistantStreamEventVariant11Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant12(class)[], + AssistantStreamEventVariant12Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant13(class)[], + AssistantStreamEventVariant13Event(enum)[], + RunObject(ref)[], + AssistantStreamEventVariant14(class)[], + AssistantStreamEventVariant14Event(enum)[], + RunStepObject(ref)[], + AssistantStreamEventVariant15(class)[], + AssistantStreamEventVariant15Event(enum)[], + RunStepObject(ref)[], + AssistantStreamEventVariant16(class)[], + AssistantStreamEventVariant16Event(enum)[], + RunStepDeltaObject(ref)[], + AssistantStreamEventVariant17(class)[], + AssistantStreamEventVariant17Event(enum)[], + RunStepObject(ref)[], + AssistantStreamEventVariant18(class)[], + AssistantStreamEventVariant18Event(enum)[], + RunStepObject(ref)[], + AssistantStreamEventVariant19(class)[], + AssistantStreamEventVariant19Event(enum)[], + RunStepObject(ref)[], + AssistantStreamEventVariant20(class)[], + AssistantStreamEventVariant20Event(enum)[], + RunStepObject(ref)[], + AssistantStreamEventVariant21(class)[], + AssistantStreamEventVariant21Event(enum)[], + MessageObject(ref)[], + AssistantStreamEventVariant22(class)[], + AssistantStreamEventVariant22Event(enum)[], + MessageObject(ref)[], + AssistantStreamEventVariant23(class)[], + AssistantStreamEventVariant23Event(enum)[], + MessageDeltaObject(ref)[], + AssistantStreamEventVariant24(class)[], + AssistantStreamEventVariant24Event(enum)[], + MessageObject(ref)[], + AssistantStreamEventVariant25(class)[], + AssistantStreamEventVariant25Event(enum)[], + MessageObject(ref)[], + AssistantStreamEventDiscriminator(class)[], + AssistantStreamEventDiscriminatorEvent(enum)[], + ErrorEvent(class)[], + ErrorEventEvent(enum)[], + Error(ref)[], Error(class)[Projects], Code(string)[Projects], Message(string)[Projects], Param(string)[Projects], Type(string)[Projects], - ErrorResponse(class)[Projects], - Error(ref)[Projects], - ListModelsResponse(class)[Models], - ListModelsResponseObject(enum)[Models], - Data(array)[Models], - Model12(ref)[Models], - Model12(class)[Models], - Id(string)[Models], - Created(int)[Models], - ModelObject(enum)[Models], - OwnedBy(string)[Models], - DeleteModelResponse(class)[Models], - Id(string)[Models], - Deleted(bool)[Models], - Object(string)[Models], - CreateCompletionRequest(class)[Completions], - Model(anyOf)[Completions], - ModelVariant1(string)[Completions], - CreateCompletionRequestModel(enum)[Completions], - Prompt(oneOf)[Completions], - PromptVariant1(string)[Completions], - PromptVariant2(array)[Completions], - PromptVariant2Item(string)[Completions], - PromptVariant3(array)[Completions], - PromptVariant3Item(int)[Completions], - PromptVariant4(array)[Completions], - PromptVariant4Item(array)[Completions], - PromptVariant4ItemItem(int)[Completions], - BestOf(int)[Completions], - Echo(bool)[Completions], - FrequencyPenalty(double)[Completions], - CreateCompletionRequestLogitBias(class)[Completions], - LogitBias(int)[Completions], - Logprobs(int)[Completions], - MaxTokens(int)[Completions], - N(int)[Completions], - PresencePenalty(double)[Completions], - Seed(int)[Completions], - Stop(oneOf)[Completions], - StopVariant1(string)[Completions], - StopVariant2(array)[Completions], - StopVariant2Item(string)[Completions], - Stream(bool)[Completions], - ChatCompletionStreamOptions(ref)[Completions], - Suffix(string)[Completions], - Temperature(double)[Completions], - TopP(double)[Completions], - User(string)[Completions], - ChatCompletionStreamOptions(class)[Chat, Completions], - IncludeUsage(bool)[Chat, Completions], - CreateCompletionResponse(class)[Completions], - Id(string)[Completions], - Choices(array)[Completions], - CreateCompletionResponseChoice(class)[Completions], - CreateCompletionResponseChoiceFinishReason(enum)[Completions], - Index(int)[Completions], - CreateCompletionResponseChoiceLogprobs(class)[Completions], - TextOffset(array)[Completions], - TextOffsetItem(int)[Completions], - TokenLogprobs(array)[Completions], - TokenLogprobsItem(double)[Completions], - Tokens(array)[Completions], - TokensItem(string)[Completions], - TopLogprobs(array)[Completions], - CreateCompletionResponseChoiceLogprobsTopLogprob(class)[Completions], - TopLogprobsItem(double)[Completions], - Text(string)[Completions], - Created(int)[Completions], - Model(string)[Completions], - SystemFingerprint(string)[Completions], - CreateCompletionResponseObject(enum)[Completions], - CompletionUsage(ref)[Completions], - CompletionUsage(class)[Chat, Completions], - CompletionTokens(int)[Chat, Completions], - PromptTokens(int)[Chat, Completions], - TotalTokens(int)[Chat, Completions], - ChatCompletionRequestMessageContentPartText(class)[Chat], - ChatCompletionRequestMessageContentPartTextType(enum)[Chat], - Text(string)[Chat], - ChatCompletionRequestMessageContentPartImage(class)[Chat], - ChatCompletionRequestMessageContentPartImageType(enum)[Chat], - ChatCompletionRequestMessageContentPartImageImageUrl(class)[Chat], - Url(Uri)[Chat], - ChatCompletionRequestMessageContentPartImageImageUrlDetail(enum)[Chat], - ChatCompletionRequestMessageContentPartRefusal(class)[Chat], - ChatCompletionRequestMessageContentPartRefusalType(enum)[Chat], - Refusal(string)[Chat], - ChatCompletionRequestMessage(oneOf)[Chat], - ChatCompletionRequestSystemMessage(ref)[Chat], - ChatCompletionRequestUserMessage(ref)[Chat], - ChatCompletionRequestAssistantMessage(ref)[Chat], - ChatCompletionRequestToolMessage(ref)[Chat], - ChatCompletionRequestFunctionMessage(ref)[Chat], - ChatCompletionRequestMessageDiscriminator(class)[Chat], - ChatCompletionRequestMessageDiscriminatorRole(enum)[Chat], - ChatCompletionRequestSystemMessage(class)[Chat], - Content(oneOf)[Chat], - ContentVariant1(string)[Chat], - ContentVariant2(array)[Chat], - ChatCompletionRequestSystemMessageContentPart(ref)[Chat], - ChatCompletionRequestSystemMessageRole(enum)[Chat], - Name(string)[Chat], - ChatCompletionRequestSystemMessageContentPart(oneOf)[Chat], - ChatCompletionRequestMessageContentPartText(ref)[Chat], - ChatCompletionRequestUserMessage(class)[Chat], - Content2(oneOf)[Chat], - ContentVariant1(string)[Chat], - ContentVariant2(array)[Chat], - ChatCompletionRequestUserMessageContentPart(ref)[Chat], - ChatCompletionRequestUserMessageRole(enum)[Chat], - Name(string)[Chat], - ChatCompletionRequestUserMessageContentPart(oneOf)[Chat], - ChatCompletionRequestMessageContentPartText(ref)[Chat], - ChatCompletionRequestMessageContentPartImage(ref)[Chat], - ChatCompletionRequestUserMessageContentPartDiscriminator(class)[Chat], - ChatCompletionRequestUserMessageContentPartDiscriminatorType(enum)[Chat], - ChatCompletionRequestAssistantMessage(class)[Chat], - Content3(oneOf)[Chat], - ContentVariant1(string)[Chat], - ContentVariant2(array)[Chat], - ChatCompletionRequestAssistantMessageContentPart(ref)[Chat], - Refusal(string)[Chat], - ChatCompletionRequestAssistantMessageRole(enum)[Chat], - Name(string)[Chat], - ChatCompletionMessageToolCalls(ref)[Chat], - ChatCompletionRequestAssistantMessageFunctionCall(class)[Chat], - Arguments(string)[Chat], - Name(string)[Chat], - ChatCompletionRequestAssistantMessageContentPart(oneOf)[Chat], - ChatCompletionRequestMessageContentPartText(ref)[Chat], - ChatCompletionRequestMessageContentPartRefusal(ref)[Chat], - ChatCompletionRequestAssistantMessageContentPartDiscriminator(class)[Chat], - ChatCompletionRequestAssistantMessageContentPartDiscriminatorType(enum)[Chat], - ChatCompletionMessageToolCalls(array)[Chat], - ChatCompletionMessageToolCall(ref)[Chat], - ChatCompletionMessageToolCall(class)[Chat], - Id(string)[Chat], - ChatCompletionMessageToolCallType(enum)[Chat], - ChatCompletionMessageToolCallFunction(class)[Chat], - Name(string)[Chat], - Arguments(string)[Chat], - ChatCompletionRequestToolMessage(class)[Chat], - ChatCompletionRequestToolMessageRole(enum)[Chat], - Content4(oneOf)[Chat], - ContentVariant1(string)[Chat], - ContentVariant2(array)[Chat], - ChatCompletionRequestToolMessageContentPart(ref)[Chat], - ToolCallId(string)[Chat], - ChatCompletionRequestToolMessageContentPart(oneOf)[Chat], - ChatCompletionRequestMessageContentPartText(ref)[Chat], - ChatCompletionRequestFunctionMessage(class)[Chat], - ChatCompletionRequestFunctionMessageRole(enum)[Chat], - Content(string)[Chat], - Name(string)[Chat], - FineTuneChatCompletionRequestAssistantMessage(allOf)[], - FineTuneChatCompletionRequestAssistantMessageVariant1(class)[], - Weight(int)[], - ChatCompletionRequestAssistantMessage(ref)[], - FunctionParameters(class)[Chat, Assistants], - ChatCompletionFunctions(class)[Chat], - Description(string)[Chat], - Name(string)[Chat], - FunctionParameters(ref)[Chat], - ChatCompletionFunctionCallOption(class)[Chat], - Name(string)[Chat], - ChatCompletionTool(class)[Chat], - ChatCompletionToolType(enum)[Chat], - FunctionObject(ref)[Chat], - FunctionObject(class)[Chat, Assistants], - Description(string)[Chat, Assistants], - Name(string)[Chat, Assistants], - FunctionParameters(ref)[Chat, Assistants], - Strict(bool)[Chat, Assistants], - ResponseFormatText(class)[Chat, Assistants], - ResponseFormatTextType(enum)[Chat, Assistants], - ResponseFormatJsonObject(class)[Chat, Assistants], - ResponseFormatJsonObjectType(enum)[Chat, Assistants], - ResponseFormatJsonSchemaSchema(class)[Chat, Assistants], - ResponseFormatJsonSchema(class)[Chat, Assistants], - ResponseFormatJsonSchemaType(enum)[Chat, Assistants], - ResponseFormatJsonSchemaJsonSchema(class)[Chat, Assistants], - Description(string)[Chat, Assistants], - Name(string)[Chat, Assistants], - ResponseFormatJsonSchemaSchema(ref)[Chat, Assistants], - Strict(bool)[Chat, Assistants], - ChatCompletionToolChoiceOption(oneOf)[Chat], - ChatCompletionToolChoiceOptionEnum(enum)[Chat], - ChatCompletionNamedToolChoice(ref)[Chat], - ChatCompletionNamedToolChoice(class)[Chat], - ChatCompletionNamedToolChoiceType(enum)[Chat], - ChatCompletionNamedToolChoiceFunction(class)[Chat], - Name(string)[Chat], + DoneEvent(class)[], + DoneEventEvent(enum)[], + DoneEventData(enum)[], + ThreadObject(class)[Assistants], + Id(string)[Assistants], + ThreadObjectObject(enum)[Assistants], + CreatedAt(int)[Assistants], + ThreadObjectToolResources(class)[Assistants], + ThreadObjectToolResourcesCodeInterpreter(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + ThreadObjectToolResourcesFileSearch(class)[Assistants], + VectorStoreIds(array)[Assistants], + VectorStoreIdsItem(string)[Assistants], + ThreadObjectMetadata(class)[Assistants], + RunObject(class)[Assistants], + Id(string)[Assistants], + RunObjectObject(enum)[Assistants], + CreatedAt(int)[Assistants], + ThreadId(string)[Assistants], + AssistantId(string)[Assistants], + RunObjectStatus(enum)[Assistants], + RunObjectRequiredAction(class)[Assistants], + RunObjectRequiredActionType(enum)[Assistants], + RunObjectRequiredActionSubmitToolOutputs(class)[Assistants], + ToolCalls(array)[Assistants], + RunToolCallObject(ref)[Assistants], + RunObjectLastError(class)[Assistants], + RunObjectLastErrorCode(enum)[Assistants], + Message(string)[Assistants], + ExpiresAt(int)[Assistants], + StartedAt(int)[Assistants], + CancelledAt(int)[Assistants], + FailedAt(int)[Assistants], + CompletedAt(int)[Assistants], + RunObjectIncompleteDetails(class)[Assistants], + RunObjectIncompleteDetailsReason(enum)[Assistants], + Model(string)[Assistants], + Instructions(string)[Assistants], + Tools(array)[Assistants], + ToolsItem8(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearch(ref)[Assistants], + AssistantToolsFunction(ref)[Assistants], + RunObjectToolDiscriminator(class)[Assistants], + RunObjectToolDiscriminatorType(enum)[Assistants], + RunObjectMetadata(class)[Assistants], + RunCompletionUsage(ref)[Assistants], + Temperature(double)[Assistants], + TopP(double)[Assistants], + MaxPromptTokens(int)[Assistants], + MaxCompletionTokens(int)[Assistants], + TruncationObject(ref)[Assistants], + AssistantsApiToolChoiceOption(ref)[Assistants], + ParallelToolCalls(ref)[Assistants], + AssistantsApiResponseFormatOption(ref)[Assistants], + RunToolCallObject(class)[Assistants], + Id(string)[Assistants], + RunToolCallObjectType(enum)[Assistants], + RunToolCallObjectFunction(class)[Assistants], + Name(string)[Assistants], + Arguments(string)[Assistants], + RunCompletionUsage(class)[Assistants], + CompletionTokens(int)[Assistants], + PromptTokens(int)[Assistants], + TotalTokens(int)[Assistants], + TruncationObject(class)[Assistants], + TruncationObjectType(enum)[Assistants], + LastMessages(int)[Assistants], + AssistantsApiToolChoiceOption(oneOf)[Assistants], + AssistantsApiToolChoiceOptionEnum(enum)[Assistants], + AssistantsNamedToolChoice(ref)[Assistants], + AssistantsNamedToolChoice(class)[Assistants], + AssistantsNamedToolChoiceType(enum)[Assistants], + AssistantsNamedToolChoiceFunction(class)[Assistants], + Name(string)[Assistants], ParallelToolCalls(bool)[Chat, Assistants], - ChatCompletionMessageToolCallChunk(class)[], + RunStepObject(class)[Assistants], + Id(string)[Assistants], + RunStepObjectObject(enum)[Assistants], + CreatedAt(int)[Assistants], + AssistantId(string)[Assistants], + ThreadId(string)[Assistants], + RunId(string)[Assistants], + RunStepObjectType(enum)[Assistants], + RunStepObjectStatus(enum)[Assistants], + RunStepObjectStepDetails(class)[Assistants], + RunStepDetailsMessageCreationObject(ref)[Assistants], + RunStepDetailsToolCallsObject(ref)[Assistants], + RunStepObjectStepDetailsDiscriminator(class)[Assistants], + RunStepObjectStepDetailsDiscriminatorType(enum)[Assistants], + RunStepObjectLastError(class)[Assistants], + RunStepObjectLastErrorCode(enum)[Assistants], + Message(string)[Assistants], + ExpiredAt(int)[Assistants], + CancelledAt(int)[Assistants], + FailedAt(int)[Assistants], + CompletedAt(int)[Assistants], + RunStepObjectMetadata(class)[Assistants], + RunStepCompletionUsage(ref)[Assistants], + RunStepDetailsMessageCreationObject(class)[Assistants], + RunStepDetailsMessageCreationObjectType(enum)[Assistants], + RunStepDetailsMessageCreationObjectMessageCreation(class)[Assistants], + MessageId(string)[Assistants], + RunStepDetailsToolCallsObject(class)[Assistants], + RunStepDetailsToolCallsObjectType(enum)[Assistants], + ToolCalls(array)[Assistants], + ToolCallsItem2(oneOf)[Assistants], + RunStepDetailsToolCallsCodeObject(ref)[Assistants], + RunStepDetailsToolCallsFileSearchObject(ref)[Assistants], + RunStepDetailsToolCallsFunctionObject(ref)[Assistants], + RunStepDetailsToolCallsObjectToolCallDiscriminator(class)[Assistants], + RunStepDetailsToolCallsObjectToolCallDiscriminatorType(enum)[Assistants], + RunStepDetailsToolCallsCodeObject(class)[Assistants], + Id(string)[Assistants], + RunStepDetailsToolCallsCodeObjectType(enum)[Assistants], + RunStepDetailsToolCallsCodeObjectCodeInterpreter(class)[Assistants], + Input(string)[Assistants], + Outputs(array)[Assistants], + RunStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class)[Assistants], + RunStepDetailsToolCallsCodeOutputLogsObject(ref)[Assistants], + RunStepDetailsToolCallsCodeOutputImageObject(ref)[Assistants], + RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class)[Assistants], + RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum)[Assistants], + RunStepDetailsToolCallsCodeOutputLogsObject(class)[Assistants], + RunStepDetailsToolCallsCodeOutputLogsObjectType(enum)[Assistants], + Logs(string)[Assistants], + RunStepDetailsToolCallsCodeOutputImageObject(class)[Assistants], + RunStepDetailsToolCallsCodeOutputImageObjectType(enum)[Assistants], + RunStepDetailsToolCallsCodeOutputImageObjectImage(class)[Assistants], + FileId(string)[Assistants], + RunStepDetailsToolCallsFileSearchObject(class)[Assistants], + Id(string)[Assistants], + RunStepDetailsToolCallsFileSearchObjectType(enum)[Assistants], + RunStepDetailsToolCallsFileSearchObjectFileSearch(class)[Assistants], + RunStepDetailsToolCallsFileSearchRankingOptionsObject(ref)[Assistants], + Results(array)[Assistants], + RunStepDetailsToolCallsFileSearchResultObject(ref)[Assistants], + RunStepDetailsToolCallsFileSearchRankingOptionsObject(class)[Assistants], + RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker(enum)[Assistants], + ScoreThreshold(double)[Assistants], + RunStepDetailsToolCallsFileSearchResultObject(class)[Assistants], + FileId(string)[Assistants], + FileName(string)[Assistants], + Score(double)[Assistants], + Content(array)[Assistants], + RunStepDetailsToolCallsFileSearchResultObjectContentItem(class)[Assistants], + RunStepDetailsToolCallsFileSearchResultObjectContentItemType(enum)[Assistants], + Text(string)[Assistants], + RunStepDetailsToolCallsFunctionObject(class)[Assistants], + Id(string)[Assistants], + RunStepDetailsToolCallsFunctionObjectType(enum)[Assistants], + RunStepDetailsToolCallsFunctionObjectFunction(class)[Assistants], + Name(string)[Assistants], + Arguments(string)[Assistants], + Output(string)[Assistants], + RunStepCompletionUsage(class)[Assistants], + CompletionTokens(int)[Assistants], + PromptTokens(int)[Assistants], + TotalTokens(int)[Assistants], + RunStepDeltaObject(class)[], + Id(string)[], + RunStepDeltaObjectObject(enum)[], + RunStepDeltaObjectDelta(class)[], + RunStepDeltaObjectDeltaStepDetails(class)[], + RunStepDeltaStepDetailsMessageCreationObject(ref)[], + RunStepDeltaStepDetailsToolCallsObject(ref)[], + RunStepDeltaObjectDeltaStepDetailsDiscriminator(class)[], + RunStepDeltaObjectDeltaStepDetailsDiscriminatorType(enum)[], + RunStepDeltaStepDetailsMessageCreationObject(class)[], + RunStepDeltaStepDetailsMessageCreationObjectType(enum)[], + RunStepDeltaStepDetailsMessageCreationObjectMessageCreation(class)[], + MessageId(string)[], + RunStepDeltaStepDetailsToolCallsObject(class)[], + RunStepDeltaStepDetailsToolCallsObjectType(enum)[], + ToolCalls(array)[], + ToolCallsItem(oneOf)[], + RunStepDeltaStepDetailsToolCallsCodeObject(ref)[], + RunStepDeltaStepDetailsToolCallsFileSearchObject(ref)[], + RunStepDeltaStepDetailsToolCallsFunctionObject(ref)[], + RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator(class)[], + RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType(enum)[], + RunStepDeltaStepDetailsToolCallsCodeObject(class)[], Index(int)[], Id(string)[], - ChatCompletionMessageToolCallChunkType(enum)[], - ChatCompletionMessageToolCallChunkFunction(class)[], + RunStepDeltaStepDetailsToolCallsCodeObjectType(enum)[], + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter(class)[], + Input(string)[], + Outputs(array)[], + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class)[], + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(ref)[], + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(ref)[], + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class)[], + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum)[], + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(class)[], + Index(int)[], + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType(enum)[], + Logs(string)[], + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(class)[], + Index(int)[], + RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType(enum)[], + RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage(class)[], + FileId(string)[], + RunStepDeltaStepDetailsToolCallsFileSearchObject(class)[], + Index(int)[], + Id(string)[], + RunStepDeltaStepDetailsToolCallsFileSearchObjectType(enum)[], + RunStepDeltaStepDetailsToolCallsFileSearchObjectFileSearch(class)[], + RunStepDeltaStepDetailsToolCallsFunctionObject(class)[], + Index(int)[], + Id(string)[], + RunStepDeltaStepDetailsToolCallsFunctionObjectType(enum)[], + RunStepDeltaStepDetailsToolCallsFunctionObjectFunction(class)[], Name(string)[], Arguments(string)[], - ChatCompletionRole(enum)[], - ChatCompletionResponseMessage(class)[Chat], - Content(string)[Chat], - Refusal(string)[Chat], - ChatCompletionMessageToolCalls(ref)[Chat], - ChatCompletionResponseMessageRole(enum)[Chat], - ChatCompletionResponseMessageFunctionCall(class)[Chat], - Arguments(string)[Chat], - Name(string)[Chat], - ChatCompletionStreamResponseDelta(class)[], - Content(string)[], - ChatCompletionStreamResponseDeltaFunctionCall(class)[], - Arguments(string)[], - Name(string)[], - ToolCalls(array)[], - ChatCompletionMessageToolCallChunk(ref)[], - ChatCompletionStreamResponseDeltaRole(enum)[], - Refusal(string)[], - CreateChatCompletionRequest(class)[Chat], - Messages(array)[Chat], - ChatCompletionRequestMessage(ref)[Chat], - Model2(anyOf)[Chat], - ModelVariant1(string)[Chat], - CreateChatCompletionRequestModel(enum)[Chat], - FrequencyPenalty(double)[Chat], - CreateChatCompletionRequestLogitBias(class)[Chat], - LogitBias(int)[Chat], - Logprobs(bool)[Chat], - TopLogprobs(int)[Chat], - MaxTokens(int)[Chat], - N(int)[Chat], - PresencePenalty(double)[Chat], - ResponseFormat(oneOf)[Chat], - ResponseFormatText(ref)[Chat], - ResponseFormatJsonObject(ref)[Chat], - ResponseFormatJsonSchema(ref)[Chat], - CreateChatCompletionRequestResponseFormatDiscriminator(class)[Chat], - CreateChatCompletionRequestResponseFormatDiscriminatorType(enum)[Chat], - Seed(int)[Chat], - CreateChatCompletionRequestServiceTier(enum)[Chat], - Stop2(oneOf)[Chat], - StopVariant1(string)[Chat], - StopVariant2(array)[Chat], - StopVariant2Item(string)[Chat], - Stream(bool)[Chat], - ChatCompletionStreamOptions(ref)[Chat], - Temperature(double)[Chat], - TopP(double)[Chat], - Tools(array)[Chat], - ChatCompletionTool(ref)[Chat], - ChatCompletionToolChoiceOption(ref)[Chat], - ParallelToolCalls(ref)[Chat], - User(string)[Chat], - FunctionCall(oneOf)[Chat], - CreateChatCompletionRequestFunctionCall(enum)[Chat], - ChatCompletionFunctionCallOption(ref)[Chat], - Functions(array)[Chat], - ChatCompletionFunctions(ref)[Chat], - CreateChatCompletionResponse(class)[Chat], - Id(string)[Chat], - Choices(array)[Chat], - CreateChatCompletionResponseChoice(class)[Chat], - CreateChatCompletionResponseChoiceFinishReason(enum)[Chat], - Index(int)[Chat], - ChatCompletionResponseMessage(ref)[Chat], - CreateChatCompletionResponseChoiceLogprobs(class)[Chat], - Content(array)[Chat], - ChatCompletionTokenLogprob(ref)[Chat], - Refusal(array)[Chat], - ChatCompletionTokenLogprob(ref)[Chat], - Created(int)[Chat], - Model(string)[Chat], - CreateChatCompletionResponseServiceTier(enum)[Chat], - SystemFingerprint(string)[Chat], - CreateChatCompletionResponseObject(enum)[Chat], - CompletionUsage(ref)[Chat], - ChatCompletionTokenLogprob(class)[Chat], - Token(string)[Chat], - Logprob(double)[Chat], - Bytes(array)[Chat], - BytesItem(int)[Chat], - TopLogprobs(array)[Chat], - ChatCompletionTokenLogprobTopLogprob(class)[Chat], - Token(string)[Chat], - Logprob(double)[Chat], - Bytes(array)[Chat], - BytesItem(int)[Chat], - CreateChatCompletionFunctionResponse(class)[], - Id(string)[], - Choices(array)[], - CreateChatCompletionFunctionResponseChoice(class)[], - CreateChatCompletionFunctionResponseChoiceFinishReason(enum)[], - Index(int)[], - ChatCompletionResponseMessage(ref)[], - Created(int)[], - Model(string)[], - SystemFingerprint(string)[], - CreateChatCompletionFunctionResponseObject(enum)[], - CompletionUsage(ref)[], - ListPaginatedFineTuningJobsResponse(class)[Fine-tuning], - Data(array)[Fine-tuning], - FineTuningJob(ref)[Fine-tuning], - HasMore(bool)[Fine-tuning], - ListPaginatedFineTuningJobsResponseObject(enum)[Fine-tuning], - FineTuningJob(class)[Fine-tuning], - Id(string)[Fine-tuning], - CreatedAt(int)[Fine-tuning], - FineTuningJobError(class)[Fine-tuning], - Code(string)[Fine-tuning], - Message(string)[Fine-tuning], - Param(string)[Fine-tuning], - FineTunedModel(string)[Fine-tuning], - FinishedAt(int)[Fine-tuning], - FineTuningJobHyperparameters(class)[Fine-tuning], - NEpochs2(oneOf)[Fine-tuning], - FineTuningJobHyperparametersNEpochs(enum)[Fine-tuning], - NEpochsVariant2(int)[Fine-tuning], - Model(string)[Fine-tuning], - FineTuningJobObject(enum)[Fine-tuning], - OrganizationId(string)[Fine-tuning], - ResultFiles(array)[Fine-tuning], - ResultFilesItem(string)[Fine-tuning], - FineTuningJobStatus(enum)[Fine-tuning], - TrainedTokens(int)[Fine-tuning], - TrainingFile(string)[Fine-tuning], - ValidationFile(string)[Fine-tuning], - Integrations(array)[Fine-tuning], - IntegrationsItem(oneOf)[Fine-tuning], - FineTuningIntegration(ref)[Fine-tuning], - Seed(int)[Fine-tuning], - EstimatedFinish(int)[Fine-tuning], - FineTuningIntegration(class)[Fine-tuning], - FineTuningIntegrationType(enum)[Fine-tuning], - FineTuningIntegrationWandb(class)[Fine-tuning], - Project(string)[Fine-tuning], - Name(string)[Fine-tuning], - Entity(string)[Fine-tuning], - Tags(array)[Fine-tuning], - TagsItem(string)[Fine-tuning], - CreateChatCompletionStreamResponse(class)[], + Output(string)[], + MessageObject(class)[Assistants], + Id(string)[Assistants], + MessageObjectObject(enum)[Assistants], + CreatedAt(int)[Assistants], + ThreadId(string)[Assistants], + MessageObjectStatus(enum)[Assistants], + MessageObjectIncompleteDetails(class)[Assistants], + MessageObjectIncompleteDetailsReason(enum)[Assistants], + CompletedAt(int)[Assistants], + IncompleteAt(int)[Assistants], + MessageObjectRole(enum)[Assistants], + Content(array)[Assistants], + ContentItem2(oneOf)[Assistants], + MessageContentImageFileObject(ref)[Assistants], + MessageContentImageUrlObject(ref)[Assistants], + MessageContentTextObject(ref)[Assistants], + MessageContentRefusalObject(ref)[Assistants], + MessageObjectContentItemDiscriminator(class)[Assistants], + MessageObjectContentItemDiscriminatorType(enum)[Assistants], + AssistantId(string)[Assistants], + RunId(string)[Assistants], + Attachments(array)[Assistants], + MessageObjectAttachment(class)[Assistants], + FileId(string)[Assistants], + Tools(array)[Assistants], + ToolsItem6(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearchTypeOnly(ref)[Assistants], + MessageObjectAttachmentToolDiscriminator(class)[Assistants], + MessageObjectAttachmentToolDiscriminatorType(enum)[Assistants], + MessageObjectMetadata(class)[Assistants], + MessageContentImageFileObject(class)[Assistants], + MessageContentImageFileObjectType(enum)[Assistants], + MessageContentImageFileObjectImageFile(class)[Assistants], + FileId(string)[Assistants], + MessageContentImageFileObjectImageFileDetail(enum)[Assistants], + MessageContentImageUrlObject(class)[Assistants], + MessageContentImageUrlObjectType(enum)[Assistants], + MessageContentImageUrlObjectImageUrl(class)[Assistants], + Url(Uri)[Assistants], + MessageContentImageUrlObjectImageUrlDetail(enum)[Assistants], + MessageContentTextObject(class)[Assistants], + MessageContentTextObjectType(enum)[Assistants], + MessageContentTextObjectText(class)[Assistants], + Value(string)[Assistants], + Annotations(array)[Assistants], + AnnotationsItem(oneOf)[Assistants], + MessageContentTextAnnotationsFileCitationObject(ref)[Assistants], + MessageContentTextAnnotationsFilePathObject(ref)[Assistants], + MessageContentTextObjectTextAnnotationDiscriminator(class)[Assistants], + MessageContentTextObjectTextAnnotationDiscriminatorType(enum)[Assistants], + MessageContentTextAnnotationsFileCitationObject(class)[Assistants], + MessageContentTextAnnotationsFileCitationObjectType(enum)[Assistants], + Text(string)[Assistants], + MessageContentTextAnnotationsFileCitationObjectFileCitation(class)[Assistants], + FileId(string)[Assistants], + StartIndex(int)[Assistants], + EndIndex(int)[Assistants], + MessageContentTextAnnotationsFilePathObject(class)[Assistants], + MessageContentTextAnnotationsFilePathObjectType(enum)[Assistants], + Text(string)[Assistants], + MessageContentTextAnnotationsFilePathObjectFilePath(class)[Assistants], + FileId(string)[Assistants], + StartIndex(int)[Assistants], + EndIndex(int)[Assistants], + MessageContentRefusalObject(class)[Assistants], + MessageContentRefusalObjectType(enum)[Assistants], + Refusal(string)[Assistants], + AssistantToolsFileSearchTypeOnly(class)[Assistants], + AssistantToolsFileSearchTypeOnlyType(enum)[Assistants], + MessageDeltaObject(class)[], Id(string)[], - Choices(array)[], - CreateChatCompletionStreamResponseChoice(class)[], - ChatCompletionStreamResponseDelta(ref)[], - CreateChatCompletionStreamResponseChoiceLogprobs(class)[], - Content(array)[], - ChatCompletionTokenLogprob(ref)[], - Refusal(array)[], - ChatCompletionTokenLogprob(ref)[], - CreateChatCompletionStreamResponseChoiceFinishReason(enum)[], - Index(int)[], - Created(int)[], - Model(string)[], - CreateChatCompletionStreamResponseServiceTier(enum)[], - SystemFingerprint(string)[], - CreateChatCompletionStreamResponseObject(enum)[], - CreateChatCompletionStreamResponseUsage(class)[], - CompletionTokens(int)[], - PromptTokens(int)[], - TotalTokens(int)[], - CreateChatCompletionImageResponse(class)[], - CreateImageRequest(class)[Images], - Prompt(string)[Images], - Model3(anyOf)[Images], - ModelVariant1(string)[Images], - CreateImageRequestModel(enum)[Images], - N(int)[Images], - CreateImageRequestQuality(enum)[Images], - CreateImageRequestResponseFormat(enum)[Images], - CreateImageRequestSize(enum)[Images], - CreateImageRequestStyle(enum)[Images], - User(string)[Images], - ImagesResponse(class)[Images], - Created(int)[Images], - Data(array)[Images], - Image(ref)[Images], - Image(class)[Images], - B64Json(string)[Images], - Url(string)[Images], - RevisedPrompt(string)[Images], - CreateImageEditRequest(class)[Images], - Image(byte[])[Images], - Prompt(string)[Images], - Mask(byte[])[Images], - Model4(anyOf)[Images], - ModelVariant1(string)[Images], - CreateImageEditRequestModel(enum)[Images], - N(int)[Images], - CreateImageEditRequestSize(enum)[Images], - CreateImageEditRequestResponseFormat(enum)[Images], - User(string)[Images], - CreateImageVariationRequest(class)[Images], - Image(byte[])[Images], - Model5(anyOf)[Images], - ModelVariant1(string)[Images], - CreateImageVariationRequestModel(enum)[Images], - N(int)[Images], - CreateImageVariationRequestResponseFormat(enum)[Images], - CreateImageVariationRequestSize(enum)[Images], - User(string)[Images], - CreateModerationRequest(class)[Moderations], - Input(oneOf)[Moderations], - InputVariant1(string)[Moderations], - InputVariant2(array)[Moderations], - InputVariant2Item(string)[Moderations], - Model6(anyOf)[Moderations], - ModelVariant1(string)[Moderations], - CreateModerationRequestModel(enum)[Moderations], - CreateModerationResponse(class)[Moderations], - Id(string)[Moderations], - Model(string)[Moderations], - Results(array)[Moderations], - CreateModerationResponseResult(class)[Moderations], - Flagged(bool)[Moderations], - CreateModerationResponseResultCategories(class)[Moderations], - Hate(bool)[Moderations], - HateThreatening(bool)[Moderations], - Harassment(bool)[Moderations], - HarassmentThreatening(bool)[Moderations], - SelfHarm(bool)[Moderations], - SelfHarmIntent(bool)[Moderations], - SelfHarmInstructions(bool)[Moderations], - Sexual(bool)[Moderations], - SexualMinors(bool)[Moderations], - Violence(bool)[Moderations], - ViolenceGraphic(bool)[Moderations], - CreateModerationResponseResultCategoryScores(class)[Moderations], - Hate(double)[Moderations], - HateThreatening(double)[Moderations], - Harassment(double)[Moderations], - HarassmentThreatening(double)[Moderations], - SelfHarm(double)[Moderations], - SelfHarmIntent(double)[Moderations], - SelfHarmInstructions(double)[Moderations], - Sexual(double)[Moderations], - SexualMinors(double)[Moderations], - Violence(double)[Moderations], - ViolenceGraphic(double)[Moderations], - ListFilesResponse(class)[Files], - Data(array)[Files], - OpenAIFile(ref)[Files], - ListFilesResponseObject(enum)[Files], - OpenAIFile(class)[Files, Uploads], - Id(string)[Files, Uploads], - Bytes(int)[Files, Uploads], - CreatedAt(int)[Files, Uploads], - Filename(string)[Files, Uploads], - OpenAIFileObject(enum)[Files, Uploads], - OpenAIFilePurpose(enum)[Files, Uploads], - OpenAIFileStatus(enum)[Files, Uploads], - StatusDetails(string)[Files, Uploads], - CreateFileRequest(class)[Files], - File(byte[])[Files], - CreateFileRequestPurpose(enum)[Files], - DeleteFileResponse(class)[Files], - Id(string)[Files], - DeleteFileResponseObject(enum)[Files], - Deleted(bool)[Files], - CreateUploadRequest(class)[Uploads], - Filename(string)[Uploads], - CreateUploadRequestPurpose(enum)[Uploads], - Bytes(int)[Uploads], - MimeType(string)[Uploads], - AddUploadPartRequest(class)[Uploads], - Data(byte[])[Uploads], - CompleteUploadRequest(class)[Uploads], - PartIds(array)[Uploads], - PartIdsItem(string)[Uploads], - Md5(string)[Uploads], + MessageDeltaObjectObject(enum)[], + MessageDeltaObjectDelta(class)[], + MessageDeltaObjectDeltaRole(enum)[], + Content(array)[], + ContentItem(oneOf)[], + MessageDeltaContentImageFileObject(ref)[], + MessageDeltaContentTextObject(ref)[], + MessageDeltaContentRefusalObject(ref)[], + MessageDeltaContentImageUrlObject(ref)[], + MessageDeltaObjectDeltaContentItemDiscriminator(class)[], + MessageDeltaObjectDeltaContentItemDiscriminatorType(enum)[], + MessageDeltaContentImageFileObject(class)[], + Index(int)[], + MessageDeltaContentImageFileObjectType(enum)[], + MessageDeltaContentImageFileObjectImageFile(class)[], + FileId(string)[], + MessageDeltaContentImageFileObjectImageFileDetail(enum)[], + MessageDeltaContentTextObject(class)[], + Index(int)[], + MessageDeltaContentTextObjectType(enum)[], + MessageDeltaContentTextObjectText(class)[], + Value(string)[], + Annotations(array)[], + AnnotationsItem2(oneOf)[], + MessageDeltaContentTextAnnotationsFileCitationObject(ref)[], + MessageDeltaContentTextAnnotationsFilePathObject(ref)[], + MessageDeltaContentTextObjectTextAnnotationDiscriminator(class)[], + MessageDeltaContentTextObjectTextAnnotationDiscriminatorType(enum)[], + MessageDeltaContentTextAnnotationsFileCitationObject(class)[], + Index(int)[], + MessageDeltaContentTextAnnotationsFileCitationObjectType(enum)[], + Text(string)[], + MessageDeltaContentTextAnnotationsFileCitationObjectFileCitation(class)[], + FileId(string)[], + Quote(string)[], + StartIndex(int)[], + EndIndex(int)[], + MessageDeltaContentTextAnnotationsFilePathObject(class)[], + Index(int)[], + MessageDeltaContentTextAnnotationsFilePathObjectType(enum)[], + Text(string)[], + MessageDeltaContentTextAnnotationsFilePathObjectFilePath(class)[], + FileId(string)[], + StartIndex(int)[], + EndIndex(int)[], + MessageDeltaContentRefusalObject(class)[], + Index(int)[], + MessageDeltaContentRefusalObjectType(enum)[], + Refusal(string)[], + MessageDeltaContentImageUrlObject(class)[], + Index(int)[], + MessageDeltaContentImageUrlObjectType(enum)[], + MessageDeltaContentImageUrlObjectImageUrl(class)[], + Url(string)[], + MessageDeltaContentImageUrlObjectImageUrlDetail(enum)[], + AudioResponseFormat(enum)[Audio], + AuditLog(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogEventType(ref)[Audit Logs], + EffectiveAt(int)[Audit Logs], + AuditLogProject(class)[Audit Logs], + Id(string)[Audit Logs], + Name(string)[Audit Logs], + AuditLogActor(ref)[Audit Logs], + AuditLogApiKeyCreated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogApiKeyCreatedData(class)[Audit Logs], + Scopes(array)[Audit Logs], + ScopesItem(string)[Audit Logs], + AuditLogApiKeyUpdated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogApiKeyUpdatedChangesRequested(class)[Audit Logs], + Scopes(array)[Audit Logs], + ScopesItem(string)[Audit Logs], + AuditLogApiKeyDeleted(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogInviteSent(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogInviteSentData(class)[Audit Logs], + Email(string)[Audit Logs], + Role(string)[Audit Logs], + AuditLogInviteAccepted(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogInviteDeleted(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogLoginFailed(class)[Audit Logs], + ErrorCode(string)[Audit Logs], + ErrorMessage(string)[Audit Logs], + AuditLogLogoutFailed(class)[Audit Logs], + ErrorCode(string)[Audit Logs], + ErrorMessage(string)[Audit Logs], + AuditLogOrganizationUpdated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogOrganizationUpdatedChangesRequested(class)[Audit Logs], + Title(string)[Audit Logs], + Description(string)[Audit Logs], + Name(string)[Audit Logs], + AuditLogOrganizationUpdatedChangesRequestedSettings(class)[Audit Logs], + ThreadsUiVisibility(string)[Audit Logs], + UsageDashboardVisibility(string)[Audit Logs], + AuditLogProjectCreated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogProjectCreatedData(class)[Audit Logs], + Name(string)[Audit Logs], + Title(string)[Audit Logs], + AuditLogProjectUpdated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogProjectUpdatedChangesRequested(class)[Audit Logs], + Title(string)[Audit Logs], + AuditLogProjectArchived(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogRateLimitUpdated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogRateLimitUpdatedChangesRequested(class)[Audit Logs], + MaxRequestsPer1Minute(int)[Audit Logs], + MaxTokensPer1Minute(int)[Audit Logs], + MaxImagesPer1Minute(int)[Audit Logs], + MaxAudioMegabytesPer1Minute(int)[Audit Logs], + MaxRequestsPer1Day(int)[Audit Logs], + Batch1DayMaxInputTokens(int)[Audit Logs], + AuditLogRateLimitDeleted(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogServiceAccountCreated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogServiceAccountCreatedData(class)[Audit Logs], + Role(string)[Audit Logs], + AuditLogServiceAccountUpdated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogServiceAccountUpdatedChangesRequested(class)[Audit Logs], + Role(string)[Audit Logs], + AuditLogServiceAccountDeleted(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogUserAdded(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogUserAddedData(class)[Audit Logs], + Role(string)[Audit Logs], + AuditLogUserUpdated(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogUserUpdatedChangesRequested(class)[Audit Logs], + Role(string)[Audit Logs], + AuditLogUserDeleted(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogEventType(enum)[Audit Logs], + AuditLogActor(class)[Audit Logs], + AuditLogActorType(enum)[Audit Logs], + AuditLogActorSession(ref)[Audit Logs], + AuditLogActorApiKey(ref)[Audit Logs], + AuditLogActorSession(class)[Audit Logs], + AuditLogActorUser(ref)[Audit Logs], + IpAddress(string)[Audit Logs], + AuditLogActorUser(class)[Audit Logs], + Id(string)[Audit Logs], + Email(string)[Audit Logs], + AuditLogActorApiKey(class)[Audit Logs], + Id(string)[Audit Logs], + AuditLogActorApiKeyType(enum)[Audit Logs], + AuditLogActorUser(ref)[Audit Logs], + AuditLogActorServiceAccount(ref)[Audit Logs], + AuditLogActorServiceAccount(class)[Audit Logs], + Id(string)[Audit Logs], + AutoChunkingStrategyRequestParam(class)[Vector stores], + AutoChunkingStrategyRequestParamType(enum)[Vector stores], + Batch(class)[Batch], + Id(string)[Batch], + BatchObject(enum)[Batch], + Endpoint(string)[Batch], + BatchErrors(class)[Batch], + Object(string)[Batch], + Data(array)[Batch], + BatchErrorsDataItem(class)[Batch], + Code(string)[Batch], + Message(string)[Batch], + Param(string)[Batch], + Line(int)[Batch], + InputFileId(string)[Batch], + CompletionWindow(string)[Batch], + BatchStatus(enum)[Batch], + OutputFileId(string)[Batch], + ErrorFileId(string)[Batch], + CreatedAt(int)[Batch], + InProgressAt(int)[Batch], + ExpiresAt(int)[Batch], + FinalizingAt(int)[Batch], + CompletedAt(int)[Batch], + FailedAt(int)[Batch], + ExpiredAt(int)[Batch], + CancellingAt(int)[Batch], + CancelledAt(int)[Batch], + BatchRequestCounts(class)[Batch], + Total(int)[Batch], + Completed(int)[Batch], + Failed(int)[Batch], + BatchMetadata(class)[Batch], + BatchRequestInput(class)[], + CustomId(string)[], + BatchRequestInputMethod(enum)[], + Url(string)[], + BatchRequestOutput(class)[], + Id(string)[], + CustomId(string)[], + BatchRequestOutputResponse(class)[], + StatusCode(int)[], + RequestId(string)[], + BatchRequestOutputResponseBody(class)[], + BatchRequestOutputError(class)[], + Code(string)[], + Message(string)[], CancelUploadRequest(class)[], - CreateFineTuningJobRequest(class)[Fine-tuning], - Model7(anyOf)[Fine-tuning], - ModelVariant1(string)[Fine-tuning], - CreateFineTuningJobRequestModel(enum)[Fine-tuning], - TrainingFile(string)[Fine-tuning], - CreateFineTuningJobRequestHyperparameters(class)[Fine-tuning], - BatchSize(oneOf)[Fine-tuning], - CreateFineTuningJobRequestHyperparametersBatchSize(enum)[Fine-tuning], - BatchSizeVariant2(int)[Fine-tuning], - LearningRateMultiplier(oneOf)[Fine-tuning], - CreateFineTuningJobRequestHyperparametersLearningRateMultiplier(enum)[Fine-tuning], - LearningRateMultiplierVariant2(double)[Fine-tuning], - NEpochs(oneOf)[Fine-tuning], - CreateFineTuningJobRequestHyperparametersNEpochs(enum)[Fine-tuning], - NEpochsVariant2(int)[Fine-tuning], - Suffix(string)[Fine-tuning], - ValidationFile(string)[Fine-tuning], - Integrations(array)[Fine-tuning], - CreateFineTuningJobRequestIntegration(class)[Fine-tuning], - Type_AllOf1Wrapped(oneOf)[Fine-tuning], - CreateFineTuningJobRequestIntegrationType(enum)[Fine-tuning], - CreateFineTuningJobRequestIntegrationWandb(class)[Fine-tuning], - Project(string)[Fine-tuning], - Name(string)[Fine-tuning], - Entity(string)[Fine-tuning], - Tags(array)[Fine-tuning], - TagsItem(string)[Fine-tuning], - Seed(int)[Fine-tuning], - ListFineTuningJobEventsResponse(class)[Fine-tuning], - Data(array)[Fine-tuning], - FineTuningJobEvent(ref)[Fine-tuning], - ListFineTuningJobEventsResponseObject(enum)[Fine-tuning], - FineTuningJobEvent(class)[Fine-tuning], - Id(string)[Fine-tuning], - CreatedAt(int)[Fine-tuning], - FineTuningJobEventLevel(enum)[Fine-tuning], - Message(string)[Fine-tuning], - FineTuningJobEventObject(enum)[Fine-tuning], - ListFineTuningJobCheckpointsResponse(class)[Fine-tuning], - Data(array)[Fine-tuning], - FineTuningJobCheckpoint(ref)[Fine-tuning], - ListFineTuningJobCheckpointsResponseObject(enum)[Fine-tuning], - FirstId(string)[Fine-tuning], - LastId(string)[Fine-tuning], - HasMore(bool)[Fine-tuning], - FineTuningJobCheckpoint(class)[Fine-tuning], - Id(string)[Fine-tuning], - CreatedAt(int)[Fine-tuning], - FineTunedModelCheckpoint(string)[Fine-tuning], - StepNumber(int)[Fine-tuning], - FineTuningJobCheckpointMetrics(class)[Fine-tuning], - Step(double)[Fine-tuning], - TrainLoss(double)[Fine-tuning], - TrainMeanTokenAccuracy(double)[Fine-tuning], - ValidLoss(double)[Fine-tuning], - ValidMeanTokenAccuracy(double)[Fine-tuning], - FullValidLoss(double)[Fine-tuning], - FullValidMeanTokenAccuracy(double)[Fine-tuning], - FineTuningJobId(string)[Fine-tuning], - FineTuningJobCheckpointObject(enum)[Fine-tuning], - CreateEmbeddingRequest(class)[Embeddings], - Input2(oneOf)[Embeddings], - InputVariant1(string)[Embeddings], - InputVariant2(array)[Embeddings], - InputVariant2Item(string)[Embeddings], - InputVariant3(array)[Embeddings], - InputVariant3Item(int)[Embeddings], - InputVariant4(array)[Embeddings], - InputVariant4Item(array)[Embeddings], - InputVariant4ItemItem(int)[Embeddings], - Model8(anyOf)[Embeddings], - ModelVariant1(string)[Embeddings], - CreateEmbeddingRequestModel(enum)[Embeddings], - CreateEmbeddingRequestEncodingFormat(enum)[Embeddings], - Dimensions(int)[Embeddings], - User(string)[Embeddings], - CreateEmbeddingResponse(class)[Embeddings], - Data(array)[Embeddings], - Embedding(ref)[Embeddings], - Model(string)[Embeddings], - CreateEmbeddingResponseObject(enum)[Embeddings], - CreateEmbeddingResponseUsage(class)[Embeddings], - PromptTokens(int)[Embeddings], - TotalTokens(int)[Embeddings], - Embedding(class)[Embeddings], - Index(int)[Embeddings], - Embedding1(array)[Embeddings], - Embedding1Item(double)[Embeddings], - EmbeddingObject(enum)[Embeddings], - CreateTranscriptionRequest(class)[Audio], - File(byte[])[Audio], - Model9(anyOf)[Audio], - ModelVariant1(string)[Audio], - CreateTranscriptionRequestModel(enum)[Audio], - Language(string)[Audio], - Prompt(string)[Audio], - CreateTranscriptionRequestResponseFormat(enum)[Audio], - Temperature(double)[Audio], - TimestampGranularities(array)[Audio], - CreateTranscriptionRequestTimestampGranularitie(enum)[Audio], - CreateTranscriptionResponseJson(class)[Audio], - Text(string)[Audio], - TranscriptionSegment(class)[Audio], - Id(int)[Audio], - Seek(int)[Audio], - Start(float)[Audio], - End(float)[Audio], - Text(string)[Audio], - Tokens(array)[Audio], - TokensItem(int)[Audio], - Temperature(float)[Audio], - AvgLogprob(float)[Audio], - CompressionRatio(float)[Audio], - NoSpeechProb(float)[Audio], - TranscriptionWord(class)[Audio], - Word(string)[Audio], - Start(float)[Audio], - End(float)[Audio], - CreateTranscriptionResponseVerboseJson(class)[Audio], - Language(string)[Audio], - Duration(string)[Audio], - Text(string)[Audio], - Words(array)[Audio], - TranscriptionWord(ref)[Audio], - Segments(array)[Audio], - TranscriptionSegment(ref)[Audio], - CreateTranslationRequest(class)[Audio], - File(byte[])[Audio], - Model10(anyOf)[Audio], - ModelVariant1(string)[Audio], - CreateTranslationRequestModel(enum)[Audio], - Prompt(string)[Audio], - ResponseFormat(string)[Audio], - Temperature(double)[Audio], - CreateTranslationResponseJson(class)[Audio], - Text(string)[Audio], - CreateTranslationResponseVerboseJson(class)[Audio], - Language(string)[Audio], - Duration(string)[Audio], - Text(string)[Audio], - Segments(array)[Audio], - TranscriptionSegment(ref)[Audio], - CreateSpeechRequest(class)[Audio], - Model11(anyOf)[Audio], - ModelVariant1(string)[Audio], - CreateSpeechRequestModel(enum)[Audio], - Input(string)[Audio], - CreateSpeechRequestVoice(enum)[Audio], - CreateSpeechRequestResponseFormat(enum)[Audio], - Speed(double)[Audio], - Upload(class)[Uploads], - Id(string)[Uploads], - CreatedAt(int)[Uploads], - Filename(string)[Uploads], - Bytes(int)[Uploads], - Purpose(string)[Uploads], - UploadStatus(enum)[Uploads], - ExpiresAt(int)[Uploads], - UploadObject(enum)[Uploads], - OpenAIFile(ref)[Uploads], - UploadPart(class)[Uploads], - Id(string)[Uploads], - CreatedAt(int)[Uploads], - UploadId(string)[Uploads], - UploadPartObject(enum)[Uploads], - FinetuneChatRequestInput(class)[], - Messages(array)[], - MessagesItem(oneOf)[], - ChatCompletionRequestSystemMessage(ref)[], - ChatCompletionRequestUserMessage(ref)[], - FineTuneChatCompletionRequestAssistantMessage(ref)[], - ChatCompletionRequestToolMessage(ref)[], - ChatCompletionRequestFunctionMessage(ref)[], - Tools(array)[], - ChatCompletionTool(ref)[], - ParallelToolCalls(ref)[], - Functions(array)[], - ChatCompletionFunctions(ref)[], - FinetuneCompletionRequestInput(class)[], - Prompt(string)[], - Completion(string)[], - RunCompletionUsage(class)[Assistants], - CompletionTokens(int)[Assistants], - PromptTokens(int)[Assistants], - TotalTokens(int)[Assistants], - RunStepCompletionUsage(class)[Assistants], - CompletionTokens(int)[Assistants], - PromptTokens(int)[Assistants], - TotalTokens(int)[Assistants], - AssistantsApiResponseFormatOption(oneOf)[Assistants], - AssistantsApiResponseFormatOptionEnum(enum)[Assistants], - ResponseFormatText(ref)[Assistants], - ResponseFormatJsonObject(ref)[Assistants], - ResponseFormatJsonSchema(ref)[Assistants], - AssistantObject(class)[Assistants], - Id(string)[Assistants], - AssistantObjectObject(enum)[Assistants], - CreatedAt(int)[Assistants], - Name(string)[Assistants], - Description(string)[Assistants], - Model(string)[Assistants], - Instructions(string)[Assistants], - Tools(array)[Assistants], - ToolsItem(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearch(ref)[Assistants], - AssistantToolsFunction(ref)[Assistants], - AssistantObjectToolDiscriminator(class)[Assistants], - AssistantObjectToolDiscriminatorType(enum)[Assistants], - AssistantObjectToolResources(class)[Assistants], - AssistantObjectToolResourcesCodeInterpreter(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - AssistantObjectToolResourcesFileSearch(class)[Assistants], - VectorStoreIds(array)[Assistants], - VectorStoreIdsItem(string)[Assistants], - AssistantObjectMetadata(class)[Assistants], - Temperature(double)[Assistants], - TopP(double)[Assistants], - AssistantsApiResponseFormatOption(ref)[Assistants], - AssistantToolsCode(class)[Assistants], - AssistantToolsCodeType(enum)[Assistants], - AssistantToolsFileSearch(class)[Assistants], - AssistantToolsFileSearchType(enum)[Assistants], - AssistantToolsFileSearchFileSearch(class)[Assistants], - MaxNumResults(int)[Assistants], - AssistantToolsFunction(class)[Assistants], - AssistantToolsFunctionType(enum)[Assistants], - FunctionObject(ref)[Assistants], - CreateAssistantRequest(class)[Assistants], - Model13(anyOf)[Assistants], - ModelVariant1(string)[Assistants], - CreateAssistantRequestModel(enum)[Assistants], - Name(string)[Assistants], - Description(string)[Assistants], - Instructions(string)[Assistants], - Tools(array)[Assistants], - ToolsItem2(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearch(ref)[Assistants], - AssistantToolsFunction(ref)[Assistants], - CreateAssistantRequestToolDiscriminator(class)[Assistants], - CreateAssistantRequestToolDiscriminatorType(enum)[Assistants], - CreateAssistantRequestToolResources(class)[Assistants], - CreateAssistantRequestToolResourcesCodeInterpreter(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - CreateAssistantRequestToolResourcesFileSearch(class)[Assistants], - VectorStoreIds(array)[Assistants], - VectorStoreIdsItem(string)[Assistants], - VectorStores(array)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStore(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class)[Assistants], - MaxChunkSizeTokens(int)[Assistants], - ChunkOverlapTokens(int)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata(class)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVariant1(class)[Assistants], - CreateAssistantRequestToolResourcesFileSearchVariant2(class)[Assistants], - CreateAssistantRequestMetadata(class)[Assistants], - Temperature(double)[Assistants], - TopP(double)[Assistants], - AssistantsApiResponseFormatOption(ref)[Assistants], - ModifyAssistantRequest(class)[Assistants], - Model_AllOf1Wrapped(anyOf)[Assistants], - ModelVariant1(string)[Assistants], - Name(string)[Assistants], - Description(string)[Assistants], - Instructions(string)[Assistants], - Tools(array)[Assistants], - ToolsItem3(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearch(ref)[Assistants], - AssistantToolsFunction(ref)[Assistants], - ModifyAssistantRequestToolDiscriminator(class)[Assistants], - ModifyAssistantRequestToolDiscriminatorType(enum)[Assistants], - ModifyAssistantRequestToolResources(class)[Assistants], - ModifyAssistantRequestToolResourcesCodeInterpreter(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - ModifyAssistantRequestToolResourcesFileSearch(class)[Assistants], - VectorStoreIds(array)[Assistants], - VectorStoreIdsItem(string)[Assistants], - ModifyAssistantRequestMetadata(class)[Assistants], - Temperature(double)[Assistants], - TopP(double)[Assistants], - AssistantsApiResponseFormatOption(ref)[Assistants], - DeleteAssistantResponse(class)[Assistants], - Id(string)[Assistants], - Deleted(bool)[Assistants], - DeleteAssistantResponseObject(enum)[Assistants], - ListAssistantsResponse(class)[Assistants], - Object(string)[Assistants], - Data(array)[Assistants], - AssistantObject(ref)[Assistants], - FirstId(string)[Assistants], - LastId(string)[Assistants], - HasMore(bool)[Assistants], - AssistantToolsFileSearchTypeOnly(class)[Assistants], - AssistantToolsFileSearchTypeOnlyType(enum)[Assistants], - TruncationObject(class)[Assistants], - TruncationObjectType(enum)[Assistants], - LastMessages(int)[Assistants], - AssistantsApiToolChoiceOption(oneOf)[Assistants], - AssistantsApiToolChoiceOptionEnum(enum)[Assistants], - AssistantsNamedToolChoice(ref)[Assistants], - AssistantsNamedToolChoice(class)[Assistants], - AssistantsNamedToolChoiceType(enum)[Assistants], - AssistantsNamedToolChoiceFunction(class)[Assistants], - Name(string)[Assistants], - RunObject(class)[Assistants], - Id(string)[Assistants], - RunObjectObject(enum)[Assistants], - CreatedAt(int)[Assistants], - ThreadId(string)[Assistants], - AssistantId(string)[Assistants], - RunObjectStatus(enum)[Assistants], - RunObjectRequiredAction(class)[Assistants], - RunObjectRequiredActionType(enum)[Assistants], - RunObjectRequiredActionSubmitToolOutputs(class)[Assistants], - ToolCalls(array)[Assistants], - RunToolCallObject(ref)[Assistants], - RunObjectLastError(class)[Assistants], - RunObjectLastErrorCode(enum)[Assistants], - Message(string)[Assistants], - ExpiresAt(int)[Assistants], - StartedAt(int)[Assistants], - CancelledAt(int)[Assistants], - FailedAt(int)[Assistants], - CompletedAt(int)[Assistants], - RunObjectIncompleteDetails(class)[Assistants], - RunObjectIncompleteDetailsReason(enum)[Assistants], - Model(string)[Assistants], - Instructions(string)[Assistants], - Tools(array)[Assistants], - ToolsItem4(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearch(ref)[Assistants], - AssistantToolsFunction(ref)[Assistants], - RunObjectToolDiscriminator(class)[Assistants], - RunObjectToolDiscriminatorType(enum)[Assistants], - RunObjectMetadata(class)[Assistants], - RunCompletionUsage(ref)[Assistants], - Temperature(double)[Assistants], - TopP(double)[Assistants], - MaxPromptTokens(int)[Assistants], - MaxCompletionTokens(int)[Assistants], - TruncationObject(ref)[Assistants], - AssistantsApiToolChoiceOption(ref)[Assistants], - ParallelToolCalls(ref)[Assistants], - AssistantsApiResponseFormatOption(ref)[Assistants], - RunToolCallObject(class)[Assistants], - Id(string)[Assistants], - RunToolCallObjectType(enum)[Assistants], - RunToolCallObjectFunction(class)[Assistants], - Name(string)[Assistants], - Arguments(string)[Assistants], - CreateRunRequest(class)[Assistants], - AssistantId(string)[Assistants], - Model14(anyOf)[Assistants], - ModelVariant1(string)[Assistants], - CreateRunRequestModel(enum)[Assistants], - Instructions(string)[Assistants], - AdditionalInstructions(string)[Assistants], - AdditionalMessages(array)[Assistants], - CreateMessageRequest(ref)[Assistants], - Tools(array)[Assistants], - ToolsItem5(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearch(ref)[Assistants], - AssistantToolsFunction(ref)[Assistants], - CreateRunRequestToolDiscriminator(class)[Assistants], - CreateRunRequestToolDiscriminatorType(enum)[Assistants], - CreateRunRequestMetadata(class)[Assistants], - Temperature(double)[Assistants], - TopP(double)[Assistants], - Stream(bool)[Assistants], - MaxPromptTokens(int)[Assistants], - MaxCompletionTokens(int)[Assistants], - TruncationObject(ref)[Assistants], - AssistantsApiToolChoiceOption(ref)[Assistants], - ParallelToolCalls(ref)[Assistants], - AssistantsApiResponseFormatOption(ref)[Assistants], - CreateMessageRequest(class)[Assistants], - CreateMessageRequestRole(enum)[Assistants], - Content5(oneOf)[Assistants], - ContentVariant1(string)[Assistants], - ContentVariant2(array)[Assistants], - ContentVariant2Item(oneOf)[Assistants], - MessageContentImageFileObject(ref)[Assistants], - MessageContentImageUrlObject(ref)[Assistants], - MessageRequestContentTextObject(ref)[Assistants], - CreateMessageRequestContentVariant2ItemDiscriminator(class)[Assistants], - CreateMessageRequestContentVariant2ItemDiscriminatorType(enum)[Assistants], - Attachments(array)[Assistants], - CreateMessageRequestAttachment(class)[Assistants], - FileId(string)[Assistants], - Tools(array)[Assistants], - ToolsItem8(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearchTypeOnly(ref)[Assistants], - CreateMessageRequestAttachmentToolDiscriminator(class)[Assistants], - CreateMessageRequestAttachmentToolDiscriminatorType(enum)[Assistants], - CreateMessageRequestMetadata(class)[Assistants], - MessageContentImageFileObject(class)[Assistants], - MessageContentImageFileObjectType(enum)[Assistants], - MessageContentImageFileObjectImageFile(class)[Assistants], - FileId(string)[Assistants], - MessageContentImageFileObjectImageFileDetail(enum)[Assistants], - MessageContentImageUrlObject(class)[Assistants], - MessageContentImageUrlObjectType(enum)[Assistants], - MessageContentImageUrlObjectImageUrl(class)[Assistants], - Url(Uri)[Assistants], - MessageContentImageUrlObjectImageUrlDetail(enum)[Assistants], - MessageRequestContentTextObject(class)[Assistants], - MessageRequestContentTextObjectType(enum)[Assistants], - Text(string)[Assistants], - ListRunsResponse(class)[Assistants], - Object(string)[Assistants], - Data(array)[Assistants], - RunObject(ref)[Assistants], - FirstId(string)[Assistants], - LastId(string)[Assistants], - HasMore(bool)[Assistants], - ModifyRunRequest(class)[Assistants], - ModifyRunRequestMetadata(class)[Assistants], - SubmitToolOutputsRunRequest(class)[Assistants], - ToolOutputs(array)[Assistants], - SubmitToolOutputsRunRequestToolOutput(class)[Assistants], - ToolCallId(string)[Assistants], - Output(string)[Assistants], - Stream(bool)[Assistants], - CreateThreadAndRunRequest(class)[Assistants], - AssistantId(string)[Assistants], - CreateThreadRequest(ref)[Assistants], - Model15(anyOf)[Assistants], - ModelVariant1(string)[Assistants], - CreateThreadAndRunRequestModel(enum)[Assistants], - Instructions(string)[Assistants], - Tools(array)[Assistants], - ToolsItem6(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearch(ref)[Assistants], - AssistantToolsFunction(ref)[Assistants], - CreateThreadAndRunRequestToolDiscriminator(class)[Assistants], - CreateThreadAndRunRequestToolDiscriminatorType(enum)[Assistants], - CreateThreadAndRunRequestToolResources(class)[Assistants], - CreateThreadAndRunRequestToolResourcesCodeInterpreter(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - CreateThreadAndRunRequestToolResourcesFileSearch(class)[Assistants], - VectorStoreIds(array)[Assistants], - VectorStoreIdsItem(string)[Assistants], - CreateThreadAndRunRequestMetadata(class)[Assistants], - Temperature(double)[Assistants], - TopP(double)[Assistants], - Stream(bool)[Assistants], - MaxPromptTokens(int)[Assistants], - MaxCompletionTokens(int)[Assistants], - TruncationObject(ref)[Assistants], - AssistantsApiToolChoiceOption(ref)[Assistants], - ParallelToolCalls(ref)[Assistants], - AssistantsApiResponseFormatOption(ref)[Assistants], - CreateThreadRequest(class)[Assistants], - Messages(array)[Assistants], - CreateMessageRequest(ref)[Assistants], - CreateThreadRequestToolResources(class)[Assistants], - CreateThreadRequestToolResourcesCodeInterpreter(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - CreateThreadRequestToolResourcesFileSearch(class)[Assistants], - VectorStoreIds(array)[Assistants], - VectorStoreIdsItem(string)[Assistants], - VectorStores(array)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStore(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class)[Assistants], - MaxChunkSizeTokens(int)[Assistants], - ChunkOverlapTokens(int)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum)[Assistants], - CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata(class)[Assistants], - CreateThreadRequestToolResourcesFileSearchVariant1(class)[Assistants], - CreateThreadRequestToolResourcesFileSearchVariant2(class)[Assistants], - CreateThreadRequestMetadata(class)[Assistants], - ThreadObject(class)[Assistants], - Id(string)[Assistants], - ThreadObjectObject(enum)[Assistants], - CreatedAt(int)[Assistants], - ThreadObjectToolResources(class)[Assistants], - ThreadObjectToolResourcesCodeInterpreter(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - ThreadObjectToolResourcesFileSearch(class)[Assistants], - VectorStoreIds(array)[Assistants], - VectorStoreIdsItem(string)[Assistants], - ThreadObjectMetadata(class)[Assistants], - ModifyThreadRequest(class)[Assistants], - ModifyThreadRequestToolResources(class)[Assistants], - ModifyThreadRequestToolResourcesCodeInterpreter(class)[Assistants], - FileIds(array)[Assistants], - FileIdsItem(string)[Assistants], - ModifyThreadRequestToolResourcesFileSearch(class)[Assistants], - VectorStoreIds(array)[Assistants], - VectorStoreIdsItem(string)[Assistants], - ModifyThreadRequestMetadata(class)[Assistants], - DeleteThreadResponse(class)[Assistants], - Id(string)[Assistants], - Deleted(bool)[Assistants], - DeleteThreadResponseObject(enum)[Assistants], - ListThreadsResponse(class)[], - Object(string)[], - Data(array)[], - ThreadObject(ref)[], - FirstId(string)[], - LastId(string)[], - HasMore(bool)[], - MessageObject(class)[Assistants], - Id(string)[Assistants], - MessageObjectObject(enum)[Assistants], - CreatedAt(int)[Assistants], - ThreadId(string)[Assistants], - MessageObjectStatus(enum)[Assistants], - MessageObjectIncompleteDetails(class)[Assistants], - MessageObjectIncompleteDetailsReason(enum)[Assistants], - CompletedAt(int)[Assistants], - IncompleteAt(int)[Assistants], - MessageObjectRole(enum)[Assistants], - Content(array)[Assistants], - ContentItem(oneOf)[Assistants], - MessageContentImageFileObject(ref)[Assistants], - MessageContentImageUrlObject(ref)[Assistants], - MessageContentTextObject(ref)[Assistants], - MessageContentRefusalObject(ref)[Assistants], - MessageObjectContentItemDiscriminator(class)[Assistants], - MessageObjectContentItemDiscriminatorType(enum)[Assistants], - AssistantId(string)[Assistants], - RunId(string)[Assistants], - Attachments(array)[Assistants], - MessageObjectAttachment(class)[Assistants], - FileId(string)[Assistants], - Tools(array)[Assistants], - ToolsItem7(oneOf)[Assistants], - AssistantToolsCode(ref)[Assistants], - AssistantToolsFileSearchTypeOnly(ref)[Assistants], - MessageObjectAttachmentToolDiscriminator(class)[Assistants], - MessageObjectAttachmentToolDiscriminatorType(enum)[Assistants], - MessageObjectMetadata(class)[Assistants], - MessageContentTextObject(class)[Assistants], - MessageContentTextObjectType(enum)[Assistants], - MessageContentTextObjectText(class)[Assistants], - Value(string)[Assistants], - Annotations(array)[Assistants], - AnnotationsItem(oneOf)[Assistants], - MessageContentTextAnnotationsFileCitationObject(ref)[Assistants], - MessageContentTextAnnotationsFilePathObject(ref)[Assistants], - MessageContentTextObjectTextAnnotationDiscriminator(class)[Assistants], - MessageContentTextObjectTextAnnotationDiscriminatorType(enum)[Assistants], - MessageContentTextAnnotationsFileCitationObject(class)[Assistants], - MessageContentTextAnnotationsFileCitationObjectType(enum)[Assistants], - Text(string)[Assistants], - MessageContentTextAnnotationsFileCitationObjectFileCitation(class)[Assistants], - FileId(string)[Assistants], - StartIndex(int)[Assistants], - EndIndex(int)[Assistants], - MessageContentTextAnnotationsFilePathObject(class)[Assistants], - MessageContentTextAnnotationsFilePathObjectType(enum)[Assistants], - Text(string)[Assistants], - MessageContentTextAnnotationsFilePathObjectFilePath(class)[Assistants], - FileId(string)[Assistants], - StartIndex(int)[Assistants], - EndIndex(int)[Assistants], - MessageContentRefusalObject(class)[Assistants], - MessageContentRefusalObjectType(enum)[Assistants], - Refusal(string)[Assistants], - MessageDeltaObject(class)[], - Id(string)[], - MessageDeltaObjectObject(enum)[], - MessageDeltaObjectDelta(class)[], - MessageDeltaObjectDeltaRole(enum)[], - Content(array)[], - ContentItem2(oneOf)[], - MessageDeltaContentImageFileObject(ref)[], - MessageDeltaContentTextObject(ref)[], - MessageDeltaContentRefusalObject(ref)[], - MessageDeltaContentImageUrlObject(ref)[], - MessageDeltaObjectDeltaContentItemDiscriminator(class)[], - MessageDeltaObjectDeltaContentItemDiscriminatorType(enum)[], - MessageDeltaContentImageFileObject(class)[], - Index(int)[], - MessageDeltaContentImageFileObjectType(enum)[], - MessageDeltaContentImageFileObjectImageFile(class)[], - FileId(string)[], - MessageDeltaContentImageFileObjectImageFileDetail(enum)[], - MessageDeltaContentTextObject(class)[], - Index(int)[], - MessageDeltaContentTextObjectType(enum)[], - MessageDeltaContentTextObjectText(class)[], - Value(string)[], - Annotations(array)[], - AnnotationsItem2(oneOf)[], - MessageDeltaContentTextAnnotationsFileCitationObject(ref)[], - MessageDeltaContentTextAnnotationsFilePathObject(ref)[], - MessageDeltaContentTextObjectTextAnnotationDiscriminator(class)[], - MessageDeltaContentTextObjectTextAnnotationDiscriminatorType(enum)[], - MessageDeltaContentTextAnnotationsFileCitationObject(class)[], - Index(int)[], - MessageDeltaContentTextAnnotationsFileCitationObjectType(enum)[], - Text(string)[], - MessageDeltaContentTextAnnotationsFileCitationObjectFileCitation(class)[], - FileId(string)[], - Quote(string)[], - StartIndex(int)[], - EndIndex(int)[], - MessageDeltaContentTextAnnotationsFilePathObject(class)[], - Index(int)[], - MessageDeltaContentTextAnnotationsFilePathObjectType(enum)[], - Text(string)[], - MessageDeltaContentTextAnnotationsFilePathObjectFilePath(class)[], - FileId(string)[], - StartIndex(int)[], - EndIndex(int)[], - MessageDeltaContentRefusalObject(class)[], - Index(int)[], - MessageDeltaContentRefusalObjectType(enum)[], - Refusal(string)[], - MessageDeltaContentImageUrlObject(class)[], - Index(int)[], - MessageDeltaContentImageUrlObjectType(enum)[], - MessageDeltaContentImageUrlObjectImageUrl(class)[], - Url(string)[], - MessageDeltaContentImageUrlObjectImageUrlDetail(enum)[], - ModifyMessageRequest(class)[Assistants], - ModifyMessageRequestMetadata(class)[Assistants], - DeleteMessageResponse(class)[Assistants], - Id(string)[Assistants], - Deleted(bool)[Assistants], - DeleteMessageResponseObject(enum)[Assistants], - ListMessagesResponse(class)[Assistants], - Object(string)[Assistants], - Data(array)[Assistants], - MessageObject(ref)[Assistants], - FirstId(string)[Assistants], - LastId(string)[Assistants], - HasMore(bool)[Assistants], - RunStepObject(class)[Assistants], - Id(string)[Assistants], - RunStepObjectObject(enum)[Assistants], - CreatedAt(int)[Assistants], - AssistantId(string)[Assistants], - ThreadId(string)[Assistants], - RunId(string)[Assistants], - RunStepObjectType(enum)[Assistants], - RunStepObjectStatus(enum)[Assistants], - RunStepObjectStepDetails(class)[Assistants], - RunStepDetailsMessageCreationObject(ref)[Assistants], - RunStepDetailsToolCallsObject(ref)[Assistants], - RunStepObjectStepDetailsDiscriminator(class)[Assistants], - RunStepObjectStepDetailsDiscriminatorType(enum)[Assistants], - RunStepObjectLastError(class)[Assistants], - RunStepObjectLastErrorCode(enum)[Assistants], - Message(string)[Assistants], - ExpiredAt(int)[Assistants], - CancelledAt(int)[Assistants], - FailedAt(int)[Assistants], - CompletedAt(int)[Assistants], - RunStepObjectMetadata(class)[Assistants], - RunStepCompletionUsage(ref)[Assistants], - RunStepDetailsMessageCreationObject(class)[Assistants], - RunStepDetailsMessageCreationObjectType(enum)[Assistants], - RunStepDetailsMessageCreationObjectMessageCreation(class)[Assistants], - MessageId(string)[Assistants], - RunStepDetailsToolCallsObject(class)[Assistants], - RunStepDetailsToolCallsObjectType(enum)[Assistants], - ToolCalls(array)[Assistants], - ToolCallsItem(oneOf)[Assistants], - RunStepDetailsToolCallsCodeObject(ref)[Assistants], - RunStepDetailsToolCallsFileSearchObject(ref)[Assistants], - RunStepDetailsToolCallsFunctionObject(ref)[Assistants], - RunStepDetailsToolCallsObjectToolCallDiscriminator(class)[Assistants], - RunStepDetailsToolCallsObjectToolCallDiscriminatorType(enum)[Assistants], - RunStepDetailsToolCallsCodeObject(class)[Assistants], - Id(string)[Assistants], - RunStepDetailsToolCallsCodeObjectType(enum)[Assistants], - RunStepDetailsToolCallsCodeObjectCodeInterpreter(class)[Assistants], - Input(string)[Assistants], - Outputs(array)[Assistants], - RunStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class)[Assistants], - RunStepDetailsToolCallsCodeOutputLogsObject(ref)[Assistants], - RunStepDetailsToolCallsCodeOutputImageObject(ref)[Assistants], - RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class)[Assistants], - RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum)[Assistants], - RunStepDetailsToolCallsCodeOutputLogsObject(class)[Assistants], - RunStepDetailsToolCallsCodeOutputLogsObjectType(enum)[Assistants], - Logs(string)[Assistants], - RunStepDetailsToolCallsCodeOutputImageObject(class)[Assistants], - RunStepDetailsToolCallsCodeOutputImageObjectType(enum)[Assistants], - RunStepDetailsToolCallsCodeOutputImageObjectImage(class)[Assistants], - FileId(string)[Assistants], - RunStepDetailsToolCallsFileSearchObject(class)[Assistants], - Id(string)[Assistants], - RunStepDetailsToolCallsFileSearchObjectType(enum)[Assistants], - RunStepDetailsToolCallsFileSearchObjectFileSearch(class)[Assistants], - RunStepDetailsToolCallsFunctionObject(class)[Assistants], - Id(string)[Assistants], - RunStepDetailsToolCallsFunctionObjectType(enum)[Assistants], - RunStepDetailsToolCallsFunctionObjectFunction(class)[Assistants], - Name(string)[Assistants], - Arguments(string)[Assistants], - Output(string)[Assistants], - RunStepDeltaObject(class)[], - Id(string)[], - RunStepDeltaObjectObject(enum)[], - RunStepDeltaObjectDelta(class)[], - RunStepDeltaObjectDeltaStepDetails(class)[], - RunStepDeltaStepDetailsMessageCreationObject(ref)[], - RunStepDeltaStepDetailsToolCallsObject(ref)[], - RunStepDeltaObjectDeltaStepDetailsDiscriminator(class)[], - RunStepDeltaObjectDeltaStepDetailsDiscriminatorType(enum)[], - RunStepDeltaStepDetailsMessageCreationObject(class)[], - RunStepDeltaStepDetailsMessageCreationObjectType(enum)[], - RunStepDeltaStepDetailsMessageCreationObjectMessageCreation(class)[], - MessageId(string)[], - RunStepDeltaStepDetailsToolCallsObject(class)[], - RunStepDeltaStepDetailsToolCallsObjectType(enum)[], - ToolCalls(array)[], - ToolCallsItem2(oneOf)[], - RunStepDeltaStepDetailsToolCallsCodeObject(ref)[], - RunStepDeltaStepDetailsToolCallsFileSearchObject(ref)[], - RunStepDeltaStepDetailsToolCallsFunctionObject(ref)[], - RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator(class)[], - RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType(enum)[], - RunStepDeltaStepDetailsToolCallsCodeObject(class)[], - Index(int)[], - Id(string)[], - RunStepDeltaStepDetailsToolCallsCodeObjectType(enum)[], - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter(class)[], - Input(string)[], - Outputs(array)[], - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class)[], - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(ref)[], - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(ref)[], - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class)[], - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum)[], - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(class)[], - Index(int)[], - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType(enum)[], - Logs(string)[], - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(class)[], - Index(int)[], - RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType(enum)[], - RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage(class)[], - FileId(string)[], - RunStepDeltaStepDetailsToolCallsFileSearchObject(class)[], - Index(int)[], - Id(string)[], - RunStepDeltaStepDetailsToolCallsFileSearchObjectType(enum)[], - RunStepDeltaStepDetailsToolCallsFileSearchObjectFileSearch(class)[], - RunStepDeltaStepDetailsToolCallsFunctionObject(class)[], + ChatCompletionFunctionCallOption(class)[Chat], + Name(string)[Chat], + ChatCompletionFunctions(class)[Chat], + Description(string)[Chat], + Name(string)[Chat], + FunctionParameters(ref)[Chat], + ChatCompletionMessageToolCall(class)[Chat], + Id(string)[Chat], + ChatCompletionMessageToolCallType(enum)[Chat], + ChatCompletionMessageToolCallFunction(class)[Chat], + Name(string)[Chat], + Arguments(string)[Chat], + ChatCompletionMessageToolCallChunk(class)[], Index(int)[], Id(string)[], - RunStepDeltaStepDetailsToolCallsFunctionObjectType(enum)[], - RunStepDeltaStepDetailsToolCallsFunctionObjectFunction(class)[], + ChatCompletionMessageToolCallChunkType(enum)[], + ChatCompletionMessageToolCallChunkFunction(class)[], Name(string)[], Arguments(string)[], - Output(string)[], - ListRunStepsResponse(class)[Assistants], - Object(string)[Assistants], - Data(array)[Assistants], - RunStepObject(ref)[Assistants], - FirstId(string)[Assistants], - LastId(string)[Assistants], - HasMore(bool)[Assistants], - VectorStoreExpirationAfter(class)[Vector Stores], - VectorStoreExpirationAfterAnchor(enum)[Vector Stores], - Days(int)[Vector Stores], - VectorStoreObject(class)[Vector Stores], - Id(string)[Vector Stores], - VectorStoreObjectObject(enum)[Vector Stores], - CreatedAt(int)[Vector Stores], - Name(string)[Vector Stores], - UsageBytes(int)[Vector Stores], - VectorStoreObjectFileCounts(class)[Vector Stores], - InProgress(int)[Vector Stores], - Completed(int)[Vector Stores], - Failed(int)[Vector Stores], - Cancelled(int)[Vector Stores], - Total(int)[Vector Stores], - VectorStoreObjectStatus(enum)[Vector Stores], - VectorStoreExpirationAfter(ref)[Vector Stores], - ExpiresAt(int)[Vector Stores], - LastActiveAt(int)[Vector Stores], - VectorStoreObjectMetadata(class)[Vector Stores], - CreateVectorStoreRequest(class)[Vector Stores], - FileIds(array)[Vector Stores], - FileIdsItem(string)[Vector Stores], - Name(string)[Vector Stores], - VectorStoreExpirationAfter(ref)[Vector Stores], - CreateVectorStoreRequestChunkingStrategy(class)[Vector Stores], - AutoChunkingStrategyRequestParam(ref)[Vector Stores], - StaticChunkingStrategyRequestParam(ref)[Vector Stores], - CreateVectorStoreRequestChunkingStrategyDiscriminator(class)[Vector Stores], - CreateVectorStoreRequestChunkingStrategyDiscriminatorType(enum)[Vector Stores], - CreateVectorStoreRequestMetadata(class)[Vector Stores], - AutoChunkingStrategyRequestParam(class)[Vector Stores], - AutoChunkingStrategyRequestParamType(enum)[Vector Stores], - StaticChunkingStrategyRequestParam(class)[Vector Stores], - StaticChunkingStrategyRequestParamType(enum)[Vector Stores], - StaticChunkingStrategy(ref)[Vector Stores], - StaticChunkingStrategy(class)[Vector Stores], - MaxChunkSizeTokens(int)[Vector Stores], - ChunkOverlapTokens(int)[Vector Stores], - UpdateVectorStoreRequest(class)[Vector Stores], - Name(string)[Vector Stores], - VectorStoreExpirationAfter(ref)[Vector Stores], - UpdateVectorStoreRequestMetadata(class)[Vector Stores], - ListVectorStoresResponse(class)[Vector Stores], - Object(string)[Vector Stores], - Data(array)[Vector Stores], - VectorStoreObject(ref)[Vector Stores], - FirstId(string)[Vector Stores], - LastId(string)[Vector Stores], - HasMore(bool)[Vector Stores], - DeleteVectorStoreResponse(class)[Vector Stores], - Id(string)[Vector Stores], - Deleted(bool)[Vector Stores], - DeleteVectorStoreResponseObject(enum)[Vector Stores], - VectorStoreFileObject(class)[Vector Stores], - Id(string)[Vector Stores], - VectorStoreFileObjectObject(enum)[Vector Stores], - UsageBytes(int)[Vector Stores], - CreatedAt(int)[Vector Stores], - VectorStoreId(string)[Vector Stores], - VectorStoreFileObjectStatus(enum)[Vector Stores], - VectorStoreFileObjectLastError(class)[Vector Stores], - VectorStoreFileObjectLastErrorCode(enum)[Vector Stores], - Message(string)[Vector Stores], - VectorStoreFileObjectChunkingStrategy(class)[Vector Stores], - StaticChunkingStrategyResponseParam(ref)[Vector Stores], - OtherChunkingStrategyResponseParam(ref)[Vector Stores], - VectorStoreFileObjectChunkingStrategyDiscriminator(class)[Vector Stores], - VectorStoreFileObjectChunkingStrategyDiscriminatorType(enum)[Vector Stores], - StaticChunkingStrategyResponseParam(class)[Vector Stores], - StaticChunkingStrategyResponseParamType(enum)[Vector Stores], - StaticChunkingStrategy(ref)[Vector Stores], - OtherChunkingStrategyResponseParam(class)[Vector Stores], - OtherChunkingStrategyResponseParamType(enum)[Vector Stores], - ChunkingStrategyRequestParam(class)[Vector Stores], - AutoChunkingStrategyRequestParam(ref)[Vector Stores], - StaticChunkingStrategyRequestParam(ref)[Vector Stores], - ChunkingStrategyRequestParamDiscriminator(class)[Vector Stores], - ChunkingStrategyRequestParamDiscriminatorType(enum)[Vector Stores], - CreateVectorStoreFileRequest(class)[Vector Stores], - FileId(string)[Vector Stores], - ChunkingStrategyRequestParam(ref)[Vector Stores], - ListVectorStoreFilesResponse(class)[Vector Stores], - Object(string)[Vector Stores], - Data(array)[Vector Stores], - VectorStoreFileObject(ref)[Vector Stores], - FirstId(string)[Vector Stores], - LastId(string)[Vector Stores], - HasMore(bool)[Vector Stores], - DeleteVectorStoreFileResponse(class)[Vector Stores], - Id(string)[Vector Stores], - Deleted(bool)[Vector Stores], - DeleteVectorStoreFileResponseObject(enum)[Vector Stores], - VectorStoreFileBatchObject(class)[Vector Stores], - Id(string)[Vector Stores], - VectorStoreFileBatchObjectObject(enum)[Vector Stores], - CreatedAt(int)[Vector Stores], - VectorStoreId(string)[Vector Stores], - VectorStoreFileBatchObjectStatus(enum)[Vector Stores], - VectorStoreFileBatchObjectFileCounts(class)[Vector Stores], - InProgress(int)[Vector Stores], - Completed(int)[Vector Stores], - Failed(int)[Vector Stores], - Cancelled(int)[Vector Stores], - Total(int)[Vector Stores], - CreateVectorStoreFileBatchRequest(class)[Vector Stores], - FileIds(array)[Vector Stores], - FileIdsItem(string)[Vector Stores], - ChunkingStrategyRequestParam(ref)[Vector Stores], - AssistantStreamEvent(oneOf)[], - ErrorEvent(ref)[], - DoneEvent(ref)[], - AssistantStreamEventVariant3(class)[], - AssistantStreamEventVariant3Event(enum)[], - ThreadObject(ref)[], - AssistantStreamEventVariant4(class)[], - AssistantStreamEventVariant4Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant5(class)[], - AssistantStreamEventVariant5Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant6(class)[], - AssistantStreamEventVariant6Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant7(class)[], - AssistantStreamEventVariant7Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant8(class)[], - AssistantStreamEventVariant8Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant9(class)[], - AssistantStreamEventVariant9Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant10(class)[], - AssistantStreamEventVariant10Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant11(class)[], - AssistantStreamEventVariant11Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant12(class)[], - AssistantStreamEventVariant12Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant13(class)[], - AssistantStreamEventVariant13Event(enum)[], - RunObject(ref)[], - AssistantStreamEventVariant14(class)[], - AssistantStreamEventVariant14Event(enum)[], - RunStepObject(ref)[], - AssistantStreamEventVariant15(class)[], - AssistantStreamEventVariant15Event(enum)[], - RunStepObject(ref)[], - AssistantStreamEventVariant16(class)[], - AssistantStreamEventVariant16Event(enum)[], - RunStepDeltaObject(ref)[], - AssistantStreamEventVariant17(class)[], - AssistantStreamEventVariant17Event(enum)[], - RunStepObject(ref)[], - AssistantStreamEventVariant18(class)[], - AssistantStreamEventVariant18Event(enum)[], - RunStepObject(ref)[], - AssistantStreamEventVariant19(class)[], - AssistantStreamEventVariant19Event(enum)[], - RunStepObject(ref)[], - AssistantStreamEventVariant20(class)[], - AssistantStreamEventVariant20Event(enum)[], - RunStepObject(ref)[], - AssistantStreamEventVariant21(class)[], - AssistantStreamEventVariant21Event(enum)[], - MessageObject(ref)[], - AssistantStreamEventVariant22(class)[], - AssistantStreamEventVariant22Event(enum)[], - MessageObject(ref)[], - AssistantStreamEventVariant23(class)[], - AssistantStreamEventVariant23Event(enum)[], - MessageDeltaObject(ref)[], - AssistantStreamEventVariant24(class)[], - AssistantStreamEventVariant24Event(enum)[], - MessageObject(ref)[], - AssistantStreamEventVariant25(class)[], - AssistantStreamEventVariant25Event(enum)[], - MessageObject(ref)[], - AssistantStreamEventDiscriminator(class)[], - AssistantStreamEventDiscriminatorEvent(enum)[], - ErrorEvent(class)[], - ErrorEventEvent(enum)[], - Error(ref)[], - DoneEvent(class)[], - DoneEventEvent(enum)[], - DoneEventData(enum)[], - Batch(class)[Batch], - Id(string)[Batch], - BatchObject(enum)[Batch], - Endpoint(string)[Batch], - BatchErrors(class)[Batch], - Object(string)[Batch], - Data(array)[Batch], - BatchErrorsDataItem(class)[Batch], - Code(string)[Batch], - Message(string)[Batch], - Param(string)[Batch], - Line(int)[Batch], - InputFileId(string)[Batch], - CompletionWindow(string)[Batch], - BatchStatus(enum)[Batch], - OutputFileId(string)[Batch], - ErrorFileId(string)[Batch], - CreatedAt(int)[Batch], - InProgressAt(int)[Batch], - ExpiresAt(int)[Batch], - FinalizingAt(int)[Batch], - CompletedAt(int)[Batch], - FailedAt(int)[Batch], - ExpiredAt(int)[Batch], - CancellingAt(int)[Batch], - CancelledAt(int)[Batch], - BatchRequestCounts(class)[Batch], - Total(int)[Batch], - Completed(int)[Batch], - Failed(int)[Batch], - BatchMetadata(class)[Batch], - BatchRequestInput(class)[], - CustomId(string)[], - BatchRequestInputMethod(enum)[], - Url(string)[], - BatchRequestOutput(class)[], + ChatCompletionMessageToolCalls(array)[Chat], + ChatCompletionMessageToolCall(ref)[Chat], + ChatCompletionModalities(array)[Chat], + ChatCompletionModalitie(enum)[Chat], + ChatCompletionNamedToolChoice(class)[Chat], + ChatCompletionNamedToolChoiceType(enum)[Chat], + ChatCompletionNamedToolChoiceFunction(class)[Chat], + Name(string)[Chat], + ChatCompletionRequestAssistantMessage(class)[Chat], + Content(oneOf)[Chat], + ContentVariant1(string)[Chat], + ContentVariant2(array)[Chat], + ChatCompletionRequestAssistantMessageContentPart(ref)[Chat], + Refusal(string)[Chat], + ChatCompletionRequestAssistantMessageRole(enum)[Chat], + Name(string)[Chat], + ChatCompletionRequestAssistantMessageAudio(class)[Chat], + Id(string)[Chat], + ChatCompletionMessageToolCalls(ref)[Chat], + ChatCompletionRequestAssistantMessageFunctionCall(class)[Chat], + Arguments(string)[Chat], + Name(string)[Chat], + ChatCompletionRequestAssistantMessageContentPart(oneOf)[Chat], + ChatCompletionRequestMessageContentPartText(ref)[Chat], + ChatCompletionRequestMessageContentPartRefusal(ref)[Chat], + ChatCompletionRequestAssistantMessageContentPartDiscriminator(class)[Chat], + ChatCompletionRequestAssistantMessageContentPartDiscriminatorType(enum)[Chat], + ChatCompletionRequestMessageContentPartText(class)[Chat], + ChatCompletionRequestMessageContentPartTextType(enum)[Chat], + Text(string)[Chat], + ChatCompletionRequestMessageContentPartRefusal(class)[Chat], + ChatCompletionRequestMessageContentPartRefusalType(enum)[Chat], + Refusal(string)[Chat], + ChatCompletionRequestFunctionMessage(class)[Chat], + ChatCompletionRequestFunctionMessageRole(enum)[Chat], + Content(string)[Chat], + Name(string)[Chat], + ChatCompletionRequestMessage(oneOf)[Chat], + ChatCompletionRequestSystemMessage(ref)[Chat], + ChatCompletionRequestUserMessage(ref)[Chat], + ChatCompletionRequestAssistantMessage(ref)[Chat], + ChatCompletionRequestToolMessage(ref)[Chat], + ChatCompletionRequestFunctionMessage(ref)[Chat], + ChatCompletionRequestMessageDiscriminator(class)[Chat], + ChatCompletionRequestMessageDiscriminatorRole(enum)[Chat], + ChatCompletionRequestSystemMessage(class)[Chat], + Content2(oneOf)[Chat], + ContentVariant1(string)[Chat], + ContentVariant2(array)[Chat], + ChatCompletionRequestSystemMessageContentPart(ref)[Chat], + ChatCompletionRequestSystemMessageRole(enum)[Chat], + Name(string)[Chat], + ChatCompletionRequestSystemMessageContentPart(oneOf)[Chat], + ChatCompletionRequestMessageContentPartText(ref)[Chat], + ChatCompletionRequestUserMessage(class)[Chat], + Content4(oneOf)[Chat], + ContentVariant1(string)[Chat], + ContentVariant2(array)[Chat], + ChatCompletionRequestUserMessageContentPart(ref)[Chat], + ChatCompletionRequestUserMessageRole(enum)[Chat], + Name(string)[Chat], + ChatCompletionRequestUserMessageContentPart(oneOf)[Chat], + ChatCompletionRequestMessageContentPartText(ref)[Chat], + ChatCompletionRequestMessageContentPartImage(ref)[Chat], + ChatCompletionRequestMessageContentPartAudio(ref)[Chat], + ChatCompletionRequestUserMessageContentPartDiscriminator(class)[Chat], + ChatCompletionRequestUserMessageContentPartDiscriminatorType(enum)[Chat], + ChatCompletionRequestMessageContentPartImage(class)[Chat], + ChatCompletionRequestMessageContentPartImageType(enum)[Chat], + ChatCompletionRequestMessageContentPartImageImageUrl(class)[Chat], + Url(Uri)[Chat], + ChatCompletionRequestMessageContentPartImageImageUrlDetail(enum)[Chat], + ChatCompletionRequestMessageContentPartAudio(class)[Chat], + ChatCompletionRequestMessageContentPartAudioType(enum)[Chat], + ChatCompletionRequestMessageContentPartAudioInputAudio(class)[Chat], + Data(string)[Chat], + ChatCompletionRequestMessageContentPartAudioInputAudioFormat(enum)[Chat], + ChatCompletionRequestToolMessage(class)[Chat], + ChatCompletionRequestToolMessageRole(enum)[Chat], + Content3(oneOf)[Chat], + ContentVariant1(string)[Chat], + ContentVariant2(array)[Chat], + ChatCompletionRequestToolMessageContentPart(ref)[Chat], + ToolCallId(string)[Chat], + ChatCompletionRequestToolMessageContentPart(oneOf)[Chat], + ChatCompletionRequestMessageContentPartText(ref)[Chat], + ChatCompletionResponseMessage(class)[Chat], + Content(string)[Chat], + Refusal(string)[Chat], + ChatCompletionMessageToolCalls(ref)[Chat], + ChatCompletionResponseMessageRole(enum)[Chat], + ChatCompletionResponseMessageFunctionCall(class)[Chat], + Arguments(string)[Chat], + Name(string)[Chat], + ChatCompletionResponseMessageAudio(class)[Chat], + Id(string)[Chat], + ExpiresAt(int)[Chat], + Data(string)[Chat], + Transcript(string)[Chat], + ChatCompletionRole(enum)[], + ChatCompletionStreamOptions(class)[Chat, Completions], + IncludeUsage(bool)[Chat, Completions], + ChatCompletionStreamResponseDelta(class)[], + Content(string)[], + ChatCompletionStreamResponseDeltaFunctionCall(class)[], + Arguments(string)[], + Name(string)[], + ToolCalls(array)[], + ChatCompletionMessageToolCallChunk(ref)[], + ChatCompletionStreamResponseDeltaRole(enum)[], + Refusal(string)[], + ChatCompletionTokenLogprob(class)[Chat], + Token(string)[Chat], + Logprob(double)[Chat], + Bytes(array)[Chat], + BytesItem(int)[Chat], + TopLogprobs(array)[Chat], + ChatCompletionTokenLogprobTopLogprob(class)[Chat], + Token(string)[Chat], + Logprob(double)[Chat], + Bytes(array)[Chat], + BytesItem(int)[Chat], + ChatCompletionTool(class)[Chat], + ChatCompletionToolType(enum)[Chat], + FunctionObject(ref)[Chat], + ChatCompletionToolChoiceOption(oneOf)[Chat], + ChatCompletionToolChoiceOptionEnum(enum)[Chat], + ChatCompletionNamedToolChoice(ref)[Chat], + ChunkingStrategyRequestParam(class)[Vector stores], + AutoChunkingStrategyRequestParam(ref)[Vector stores], + StaticChunkingStrategyRequestParam(ref)[Vector stores], + ChunkingStrategyRequestParamDiscriminator(class)[Vector stores], + ChunkingStrategyRequestParamDiscriminatorType(enum)[Vector stores], + StaticChunkingStrategyRequestParam(class)[Vector stores], + StaticChunkingStrategyRequestParamType(enum)[Vector stores], + StaticChunkingStrategy(ref)[Vector stores], + StaticChunkingStrategy(class)[Vector stores], + MaxChunkSizeTokens(int)[Vector stores], + ChunkOverlapTokens(int)[Vector stores], + CompleteUploadRequest(class)[Uploads], + PartIds(array)[Uploads], + PartIdsItem(string)[Uploads], + Md5(string)[Uploads], + CompletionUsage(class)[Chat, Completions], + CompletionTokens(int)[Chat, Completions], + PromptTokens(int)[Chat, Completions], + TotalTokens(int)[Chat, Completions], + CompletionUsageCompletionTokensDetails(class)[Chat, Completions], + AcceptedPredictionTokens(int)[Chat, Completions], + AudioTokens(int)[Chat, Completions], + ReasoningTokens(int)[Chat, Completions], + RejectedPredictionTokens(int)[Chat, Completions], + CompletionUsagePromptTokensDetails(class)[Chat, Completions], + AudioTokens(int)[Chat, Completions], + CachedTokens(int)[Chat, Completions], + CostsResult(class)[Usage], + CostsResultObject(enum)[Usage], + CostsResultAmount(class)[Usage], + Value(double)[Usage], + Currency(string)[Usage], + LineItem(string)[Usage], + ProjectId(string)[Usage], + CreateAssistantRequest(class)[Assistants], + Model(anyOf)[Assistants], + ModelVariant1(string)[Assistants], + CreateAssistantRequestModel(enum)[Assistants], + Name(string)[Assistants], + Description(string)[Assistants], + Instructions(string)[Assistants], + Tools(array)[Assistants], + ToolsItem2(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearch(ref)[Assistants], + AssistantToolsFunction(ref)[Assistants], + CreateAssistantRequestToolDiscriminator(class)[Assistants], + CreateAssistantRequestToolDiscriminatorType(enum)[Assistants], + CreateAssistantRequestToolResources(class)[Assistants], + CreateAssistantRequestToolResourcesCodeInterpreter(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + CreateAssistantRequestToolResourcesFileSearch(class)[Assistants], + VectorStoreIds(array)[Assistants], + VectorStoreIdsItem(string)[Assistants], + VectorStores(array)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStore(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class)[Assistants], + MaxChunkSizeTokens(int)[Assistants], + ChunkOverlapTokens(int)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata(class)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVariant1(class)[Assistants], + CreateAssistantRequestToolResourcesFileSearchVariant2(class)[Assistants], + CreateAssistantRequestMetadata(class)[Assistants], + Temperature(double)[Assistants], + TopP(double)[Assistants], + AssistantsApiResponseFormatOption(ref)[Assistants], + CreateChatCompletionFunctionResponse(class)[], Id(string)[], - CustomId(string)[], - BatchRequestOutputResponse(class)[], - StatusCode(int)[], - RequestId(string)[], - BatchRequestOutputResponseBody(class)[], - BatchRequestOutputError(class)[], - Code(string)[], - Message(string)[], - ListBatchesResponse(class)[Batch], - Data(array)[Batch], - Batch(ref)[Batch], - FirstId(string)[Batch], - LastId(string)[Batch], - HasMore(bool)[Batch], - ListBatchesResponseObject(enum)[Batch], - AuditLogActorServiceAccount(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogActorUser(class)[Audit Logs], - Id(string)[Audit Logs], - Email(string)[Audit Logs], - AuditLogActorApiKey(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogActorApiKeyType(enum)[Audit Logs], - AuditLogActorUser(ref)[Audit Logs], - AuditLogActorServiceAccount(ref)[Audit Logs], - AuditLogActorSession(class)[Audit Logs], - AuditLogActorUser(ref)[Audit Logs], - IpAddress(string)[Audit Logs], - AuditLogActor(class)[Audit Logs], - AuditLogActorType(enum)[Audit Logs], - AuditLogActorSession(ref)[Audit Logs], - AuditLogActorApiKey(ref)[Audit Logs], - AuditLogEventType(enum)[Audit Logs], - AuditLog(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogEventType(ref)[Audit Logs], - EffectiveAt(int)[Audit Logs], - AuditLogProject(class)[Audit Logs], - Id(string)[Audit Logs], - Name(string)[Audit Logs], - AuditLogActor(ref)[Audit Logs], - AuditLogApiKeyCreated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogApiKeyCreatedData(class)[Audit Logs], - Scopes(array)[Audit Logs], - ScopesItem(string)[Audit Logs], - AuditLogApiKeyUpdated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogApiKeyUpdatedChangesRequested(class)[Audit Logs], - Scopes(array)[Audit Logs], - ScopesItem(string)[Audit Logs], - AuditLogApiKeyDeleted(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogInviteSent(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogInviteSentData(class)[Audit Logs], - Email(string)[Audit Logs], - Role(string)[Audit Logs], - AuditLogInviteAccepted(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogInviteDeleted(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogLoginFailed(class)[Audit Logs], - ErrorCode(string)[Audit Logs], - ErrorMessage(string)[Audit Logs], - AuditLogLogoutFailed(class)[Audit Logs], - ErrorCode(string)[Audit Logs], - ErrorMessage(string)[Audit Logs], - AuditLogOrganizationUpdated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogOrganizationUpdatedChangesRequested(class)[Audit Logs], - Title(string)[Audit Logs], - Description(string)[Audit Logs], - Name(string)[Audit Logs], - AuditLogOrganizationUpdatedChangesRequestedSettings(class)[Audit Logs], - ThreadsUiVisibility(string)[Audit Logs], - UsageDashboardVisibility(string)[Audit Logs], - AuditLogProjectCreated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogProjectCreatedData(class)[Audit Logs], - Name(string)[Audit Logs], - Title(string)[Audit Logs], - AuditLogProjectUpdated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogProjectUpdatedChangesRequested(class)[Audit Logs], - Title(string)[Audit Logs], - AuditLogProjectArchived(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogServiceAccountCreated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogServiceAccountCreatedData(class)[Audit Logs], - Role(string)[Audit Logs], - AuditLogServiceAccountUpdated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogServiceAccountUpdatedChangesRequested(class)[Audit Logs], - Role(string)[Audit Logs], - AuditLogServiceAccountDeleted(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogUserAdded(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogUserAddedData(class)[Audit Logs], - Role(string)[Audit Logs], - AuditLogUserUpdated(class)[Audit Logs], - Id(string)[Audit Logs], - AuditLogUserUpdatedChangesRequested(class)[Audit Logs], - Role(string)[Audit Logs], - AuditLogUserDeleted(class)[Audit Logs], - Id(string)[Audit Logs], - ListAuditLogsResponse(class)[Audit Logs], - ListAuditLogsResponseObject(enum)[Audit Logs], - Data(array)[Audit Logs], - AuditLog(ref)[Audit Logs], - FirstId(string)[Audit Logs], - LastId(string)[Audit Logs], - HasMore(bool)[Audit Logs], + Choices(array)[], + CreateChatCompletionFunctionResponseChoice(class)[], + CreateChatCompletionFunctionResponseChoiceFinishReason(enum)[], + Index(int)[], + ChatCompletionResponseMessage(ref)[], + Created(int)[], + Model(string)[], + SystemFingerprint(string)[], + CreateChatCompletionFunctionResponseObject(enum)[], + CompletionUsage(ref)[], + CreateChatCompletionImageResponse(class)[], + CreateChatCompletionRequest(class)[Chat], + Messages(array)[Chat], + ChatCompletionRequestMessage(ref)[Chat], + Model2(anyOf)[Chat], + ModelVariant1(string)[Chat], + CreateChatCompletionRequestModel(enum)[Chat], + Store(bool)[Chat], + CreateChatCompletionRequestMetadata(class)[Chat], + Metadata(string)[Chat], + FrequencyPenalty(double)[Chat], + CreateChatCompletionRequestLogitBias(class)[Chat], + LogitBias(int)[Chat], + Logprobs(bool)[Chat], + TopLogprobs(int)[Chat], + MaxTokens(int)[Chat], + MaxCompletionTokens(int)[Chat], + N(int)[Chat], + ChatCompletionModalities(ref)[Chat], + Prediction_AllOf1Wrapped(oneOf)[Chat], + PredictionContent(ref)[Chat], + CreateChatCompletionRequestAudio(class)[Chat], + CreateChatCompletionRequestAudioVoice(enum)[Chat], + CreateChatCompletionRequestAudioFormat(enum)[Chat], + PresencePenalty(double)[Chat], + ResponseFormat(oneOf)[Chat], + ResponseFormatText(ref)[Chat], + ResponseFormatJsonObject(ref)[Chat], + ResponseFormatJsonSchema(ref)[Chat], + CreateChatCompletionRequestResponseFormatDiscriminator(class)[Chat], + CreateChatCompletionRequestResponseFormatDiscriminatorType(enum)[Chat], + Seed(int)[Chat], + CreateChatCompletionRequestServiceTier(enum)[Chat], + Stop(oneOf)[Chat], + StopVariant1(string)[Chat], + StopVariant2(array)[Chat], + StopVariant2Item(string)[Chat], + Stream(bool)[Chat], + ChatCompletionStreamOptions(ref)[Chat], + Temperature(double)[Chat], + TopP(double)[Chat], + Tools(array)[Chat], + ChatCompletionTool(ref)[Chat], + ChatCompletionToolChoiceOption(ref)[Chat], + ParallelToolCalls(ref)[Chat], + User(string)[Chat], + FunctionCall(oneOf)[Chat], + CreateChatCompletionRequestFunctionCall(enum)[Chat], + ChatCompletionFunctionCallOption(ref)[Chat], + Functions(array)[Chat], + ChatCompletionFunctions(ref)[Chat], + PredictionContent(class)[Chat], + PredictionContentType(enum)[Chat], + Content6(oneOf)[Chat], + ContentVariant1(string)[Chat], + ContentVariant2(array)[Chat], + ChatCompletionRequestMessageContentPartText(ref)[Chat], + CreateChatCompletionResponse(class)[Chat], + Id(string)[Chat], + Choices(array)[Chat], + CreateChatCompletionResponseChoice(class)[Chat], + CreateChatCompletionResponseChoiceFinishReason(enum)[Chat], + Index(int)[Chat], + ChatCompletionResponseMessage(ref)[Chat], + CreateChatCompletionResponseChoiceLogprobs(class)[Chat], + Content(array)[Chat], + ChatCompletionTokenLogprob(ref)[Chat], + Refusal(array)[Chat], + ChatCompletionTokenLogprob(ref)[Chat], + Created(int)[Chat], + Model(string)[Chat], + CreateChatCompletionResponseServiceTier(enum)[Chat], + SystemFingerprint(string)[Chat], + CreateChatCompletionResponseObject(enum)[Chat], + CompletionUsage(ref)[Chat], + CreateChatCompletionStreamResponse(class)[], + Id(string)[], + Choices(array)[], + CreateChatCompletionStreamResponseChoice(class)[], + ChatCompletionStreamResponseDelta(ref)[], + CreateChatCompletionStreamResponseChoiceLogprobs(class)[], + Content(array)[], + ChatCompletionTokenLogprob(ref)[], + Refusal(array)[], + ChatCompletionTokenLogprob(ref)[], + CreateChatCompletionStreamResponseChoiceFinishReason(enum)[], + Index(int)[], + Created(int)[], + Model(string)[], + CreateChatCompletionStreamResponseServiceTier(enum)[], + SystemFingerprint(string)[], + CreateChatCompletionStreamResponseObject(enum)[], + CreateChatCompletionStreamResponseUsage(class)[], + CompletionTokens(int)[], + PromptTokens(int)[], + TotalTokens(int)[], + CreateCompletionRequest(class)[Completions], + Model3(anyOf)[Completions], + ModelVariant1(string)[Completions], + CreateCompletionRequestModel(enum)[Completions], + Prompt(oneOf)[Completions], + PromptVariant1(string)[Completions], + PromptVariant2(array)[Completions], + PromptVariant2Item(string)[Completions], + PromptVariant3(array)[Completions], + PromptVariant3Item(int)[Completions], + PromptVariant4(array)[Completions], + PromptVariant4Item(array)[Completions], + PromptVariant4ItemItem(int)[Completions], + BestOf(int)[Completions], + Echo(bool)[Completions], + FrequencyPenalty(double)[Completions], + CreateCompletionRequestLogitBias(class)[Completions], + LogitBias(int)[Completions], + Logprobs(int)[Completions], + MaxTokens(int)[Completions], + N(int)[Completions], + PresencePenalty(double)[Completions], + Seed(int)[Completions], + Stop2(oneOf)[Completions], + StopVariant1(string)[Completions], + StopVariant2(array)[Completions], + StopVariant2Item(string)[Completions], + Stream(bool)[Completions], + ChatCompletionStreamOptions(ref)[Completions], + Suffix(string)[Completions], + Temperature(double)[Completions], + TopP(double)[Completions], + User(string)[Completions], + CreateCompletionResponse(class)[Completions], + Id(string)[Completions], + Choices(array)[Completions], + CreateCompletionResponseChoice(class)[Completions], + CreateCompletionResponseChoiceFinishReason(enum)[Completions], + Index(int)[Completions], + CreateCompletionResponseChoiceLogprobs(class)[Completions], + TextOffset(array)[Completions], + TextOffsetItem(int)[Completions], + TokenLogprobs(array)[Completions], + TokenLogprobsItem(double)[Completions], + Tokens(array)[Completions], + TokensItem(string)[Completions], + TopLogprobs(array)[Completions], + CreateCompletionResponseChoiceLogprobsTopLogprob(class)[Completions], + TopLogprobsItem(double)[Completions], + Text(string)[Completions], + Created(int)[Completions], + Model(string)[Completions], + SystemFingerprint(string)[Completions], + CreateCompletionResponseObject(enum)[Completions], + CompletionUsage(ref)[Completions], + CreateEmbeddingRequest(class)[Embeddings], + Input(oneOf)[Embeddings], + InputVariant1(string)[Embeddings], + InputVariant2(array)[Embeddings], + InputVariant2Item(string)[Embeddings], + InputVariant3(array)[Embeddings], + InputVariant3Item(int)[Embeddings], + InputVariant4(array)[Embeddings], + InputVariant4Item(array)[Embeddings], + InputVariant4ItemItem(int)[Embeddings], + Model4(anyOf)[Embeddings], + ModelVariant1(string)[Embeddings], + CreateEmbeddingRequestModel(enum)[Embeddings], + CreateEmbeddingRequestEncodingFormat(enum)[Embeddings], + Dimensions(int)[Embeddings], + User(string)[Embeddings], + CreateEmbeddingResponse(class)[Embeddings], + Data(array)[Embeddings], + Embedding(ref)[Embeddings], + Model(string)[Embeddings], + CreateEmbeddingResponseObject(enum)[Embeddings], + CreateEmbeddingResponseUsage(class)[Embeddings], + PromptTokens(int)[Embeddings], + TotalTokens(int)[Embeddings], + Embedding(class)[Embeddings], + Index(int)[Embeddings], + Embedding1(array)[Embeddings], + Embedding1Item(double)[Embeddings], + EmbeddingObject(enum)[Embeddings], + CreateFileRequest(class)[Files], + File(byte[])[Files], + CreateFileRequestPurpose(enum)[Files], + CreateFineTuningJobRequest(class)[Fine-tuning], + Model5(anyOf)[Fine-tuning], + ModelVariant1(string)[Fine-tuning], + CreateFineTuningJobRequestModel(enum)[Fine-tuning], + TrainingFile(string)[Fine-tuning], + CreateFineTuningJobRequestHyperparameters(class)[Fine-tuning], + BatchSize(oneOf)[Fine-tuning], + CreateFineTuningJobRequestHyperparametersBatchSize(enum)[Fine-tuning], + BatchSizeVariant2(int)[Fine-tuning], + LearningRateMultiplier(oneOf)[Fine-tuning], + CreateFineTuningJobRequestHyperparametersLearningRateMultiplier(enum)[Fine-tuning], + LearningRateMultiplierVariant2(double)[Fine-tuning], + NEpochs(oneOf)[Fine-tuning], + CreateFineTuningJobRequestHyperparametersNEpochs(enum)[Fine-tuning], + NEpochsVariant2(int)[Fine-tuning], + Suffix(string)[Fine-tuning], + ValidationFile(string)[Fine-tuning], + Integrations(array)[Fine-tuning], + CreateFineTuningJobRequestIntegration(class)[Fine-tuning], + Type_AllOf1Wrapped(oneOf)[Fine-tuning], + CreateFineTuningJobRequestIntegrationType(enum)[Fine-tuning], + CreateFineTuningJobRequestIntegrationWandb(class)[Fine-tuning], + Project(string)[Fine-tuning], + Name(string)[Fine-tuning], + Entity(string)[Fine-tuning], + Tags(array)[Fine-tuning], + TagsItem(string)[Fine-tuning], + Seed(int)[Fine-tuning], + CreateImageEditRequest(class)[Images], + Image(byte[])[Images], + Prompt(string)[Images], + Mask(byte[])[Images], + Model6(anyOf)[Images], + ModelVariant1(string)[Images], + CreateImageEditRequestModel(enum)[Images], + N(int)[Images], + CreateImageEditRequestSize(enum)[Images], + CreateImageEditRequestResponseFormat(enum)[Images], + User(string)[Images], + CreateImageRequest(class)[Images], + Prompt(string)[Images], + Model7(anyOf)[Images], + ModelVariant1(string)[Images], + CreateImageRequestModel(enum)[Images], + N(int)[Images], + CreateImageRequestQuality(enum)[Images], + CreateImageRequestResponseFormat(enum)[Images], + CreateImageRequestSize(enum)[Images], + CreateImageRequestStyle(enum)[Images], + User(string)[Images], + CreateImageVariationRequest(class)[Images], + Image(byte[])[Images], + Model8(anyOf)[Images], + ModelVariant1(string)[Images], + CreateImageVariationRequestModel(enum)[Images], + N(int)[Images], + CreateImageVariationRequestResponseFormat(enum)[Images], + CreateImageVariationRequestSize(enum)[Images], + User(string)[Images], + CreateMessageRequest(class)[Assistants], + CreateMessageRequestRole(enum)[Assistants], + Content5(oneOf)[Assistants], + ContentVariant1(string)[Assistants], + ContentVariant2(array)[Assistants], + ContentVariant2Item(oneOf)[Assistants], + MessageContentImageFileObject(ref)[Assistants], + MessageContentImageUrlObject(ref)[Assistants], + MessageRequestContentTextObject(ref)[Assistants], + CreateMessageRequestContentVariant2ItemDiscriminator(class)[Assistants], + CreateMessageRequestContentVariant2ItemDiscriminatorType(enum)[Assistants], + Attachments(array)[Assistants], + CreateMessageRequestAttachment(class)[Assistants], + FileId(string)[Assistants], + Tools(array)[Assistants], + ToolsItem3(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearchTypeOnly(ref)[Assistants], + CreateMessageRequestAttachmentToolDiscriminator(class)[Assistants], + CreateMessageRequestAttachmentToolDiscriminatorType(enum)[Assistants], + CreateMessageRequestMetadata(class)[Assistants], + MessageRequestContentTextObject(class)[Assistants], + MessageRequestContentTextObjectType(enum)[Assistants], + Text(string)[Assistants], + CreateModerationRequest(class)[Moderations], + Input2(oneOf)[Moderations], + InputVariant1(string)[Moderations], + InputVariant2(array)[Moderations], + InputVariant2Item(string)[Moderations], + InputVariant3(array)[Moderations], + InputVariant3Item(oneOf)[Moderations], + CreateModerationRequestInputVariant3ItemVariant1(class)[Moderations], + CreateModerationRequestInputVariant3ItemVariant1Type(enum)[Moderations], + CreateModerationRequestInputVariant3ItemVariant1ImageUrl(class)[Moderations], + Url(Uri)[Moderations], + CreateModerationRequestInputVariant3ItemVariant2(class)[Moderations], + CreateModerationRequestInputVariant3ItemVariant2Type(enum)[Moderations], + Text(string)[Moderations], + CreateModerationRequestInputVariant3ItemDiscriminator(class)[Moderations], + CreateModerationRequestInputVariant3ItemDiscriminatorType(enum)[Moderations], + Model9(anyOf)[Moderations], + ModelVariant1(string)[Moderations], + CreateModerationRequestModel(enum)[Moderations], + CreateModerationResponse(class)[Moderations], + Id(string)[Moderations], + Model(string)[Moderations], + Results(array)[Moderations], + CreateModerationResponseResult(class)[Moderations], + Flagged(bool)[Moderations], + CreateModerationResponseResultCategories(class)[Moderations], + Hate(bool)[Moderations], + HateThreatening(bool)[Moderations], + Harassment(bool)[Moderations], + HarassmentThreatening(bool)[Moderations], + Illicit(bool)[Moderations], + IllicitViolent(bool)[Moderations], + SelfHarm(bool)[Moderations], + SelfHarmIntent(bool)[Moderations], + SelfHarmInstructions(bool)[Moderations], + Sexual(bool)[Moderations], + SexualMinors(bool)[Moderations], + Violence(bool)[Moderations], + ViolenceGraphic(bool)[Moderations], + CreateModerationResponseResultCategoryScores(class)[Moderations], + Hate(double)[Moderations], + HateThreatening(double)[Moderations], + Harassment(double)[Moderations], + HarassmentThreatening(double)[Moderations], + Illicit(double)[Moderations], + IllicitViolent(double)[Moderations], + SelfHarm(double)[Moderations], + SelfHarmIntent(double)[Moderations], + SelfHarmInstructions(double)[Moderations], + Sexual(double)[Moderations], + SexualMinors(double)[Moderations], + Violence(double)[Moderations], + ViolenceGraphic(double)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypes(class)[Moderations], + Hate(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesHateItem(enum)[Moderations], + HateThreatening(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem(enum)[Moderations], + Harassment(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem(enum)[Moderations], + HarassmentThreatening(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem(enum)[Moderations], + Illicit(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem(enum)[Moderations], + IllicitViolent(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem(enum)[Moderations], + SelfHarm(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem(enum)[Moderations], + SelfHarmIntent(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem(enum)[Moderations], + SelfHarmInstructions(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction(enum)[Moderations], + Sexual(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesSexualItem(enum)[Moderations], + SexualMinors(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor(enum)[Moderations], + Violence(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem(enum)[Moderations], + ViolenceGraphic(array)[Moderations], + CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem(enum)[Moderations], + CreateRunRequest(class)[Assistants], + AssistantId(string)[Assistants], + Model10(anyOf)[Assistants], + ModelVariant1(string)[Assistants], + CreateRunRequestModel(enum)[Assistants], + Instructions(string)[Assistants], + AdditionalInstructions(string)[Assistants], + AdditionalMessages(array)[Assistants], + CreateMessageRequest(ref)[Assistants], + Tools(array)[Assistants], + ToolsItem4(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearch(ref)[Assistants], + AssistantToolsFunction(ref)[Assistants], + CreateRunRequestToolDiscriminator(class)[Assistants], + CreateRunRequestToolDiscriminatorType(enum)[Assistants], + CreateRunRequestMetadata(class)[Assistants], + Temperature(double)[Assistants], + TopP(double)[Assistants], + Stream(bool)[Assistants], + MaxPromptTokens(int)[Assistants], + MaxCompletionTokens(int)[Assistants], + TruncationObject(ref)[Assistants], + AssistantsApiToolChoiceOption(ref)[Assistants], + ParallelToolCalls(ref)[Assistants], + AssistantsApiResponseFormatOption(ref)[Assistants], + CreateSpeechRequest(class)[Audio], + Model11(anyOf)[Audio], + ModelVariant1(string)[Audio], + CreateSpeechRequestModel(enum)[Audio], + Input(string)[Audio], + CreateSpeechRequestVoice(enum)[Audio], + CreateSpeechRequestResponseFormat(enum)[Audio], + Speed(double)[Audio], + CreateThreadAndRunRequest(class)[Assistants], + AssistantId(string)[Assistants], + CreateThreadRequest(ref)[Assistants], + Model12(anyOf)[Assistants], + ModelVariant1(string)[Assistants], + CreateThreadAndRunRequestModel(enum)[Assistants], + Instructions(string)[Assistants], + Tools(array)[Assistants], + ToolsItem5(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearch(ref)[Assistants], + AssistantToolsFunction(ref)[Assistants], + CreateThreadAndRunRequestToolDiscriminator(class)[Assistants], + CreateThreadAndRunRequestToolDiscriminatorType(enum)[Assistants], + CreateThreadAndRunRequestToolResources(class)[Assistants], + CreateThreadAndRunRequestToolResourcesCodeInterpreter(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + CreateThreadAndRunRequestToolResourcesFileSearch(class)[Assistants], + VectorStoreIds(array)[Assistants], + VectorStoreIdsItem(string)[Assistants], + CreateThreadAndRunRequestMetadata(class)[Assistants], + Temperature(double)[Assistants], + TopP(double)[Assistants], + Stream(bool)[Assistants], + MaxPromptTokens(int)[Assistants], + MaxCompletionTokens(int)[Assistants], + TruncationObject(ref)[Assistants], + AssistantsApiToolChoiceOption(ref)[Assistants], + ParallelToolCalls(ref)[Assistants], + AssistantsApiResponseFormatOption(ref)[Assistants], + CreateThreadRequest(class)[Assistants], + Messages(array)[Assistants], + CreateMessageRequest(ref)[Assistants], + CreateThreadRequestToolResources(class)[Assistants], + CreateThreadRequestToolResourcesCodeInterpreter(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + CreateThreadRequestToolResourcesFileSearch(class)[Assistants], + VectorStoreIds(array)[Assistants], + VectorStoreIdsItem(string)[Assistants], + VectorStores(array)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStore(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class)[Assistants], + MaxChunkSizeTokens(int)[Assistants], + ChunkOverlapTokens(int)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum)[Assistants], + CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata(class)[Assistants], + CreateThreadRequestToolResourcesFileSearchVariant1(class)[Assistants], + CreateThreadRequestToolResourcesFileSearchVariant2(class)[Assistants], + CreateThreadRequestMetadata(class)[Assistants], + CreateTranscriptionRequest(class)[Audio], + File(byte[])[Audio], + Model13(anyOf)[Audio], + ModelVariant1(string)[Audio], + CreateTranscriptionRequestModel(enum)[Audio], + Language(string)[Audio], + Prompt(string)[Audio], + AudioResponseFormat(ref)[Audio], + Temperature(double)[Audio], + TimestampGranularities(array)[Audio], + CreateTranscriptionRequestTimestampGranularitie(enum)[Audio], + CreateTranscriptionResponseJson(class)[Audio], + Text(string)[Audio], + CreateTranscriptionResponseVerboseJson(class)[Audio], + Language(string)[Audio], + Duration(string)[Audio], + Text(string)[Audio], + Words(array)[Audio], + TranscriptionWord(ref)[Audio], + Segments(array)[Audio], + TranscriptionSegment(ref)[Audio], + TranscriptionWord(class)[Audio], + Word(string)[Audio], + Start(float)[Audio], + End(float)[Audio], + TranscriptionSegment(class)[Audio], + Id(int)[Audio], + Seek(int)[Audio], + Start(float)[Audio], + End(float)[Audio], + Text(string)[Audio], + Tokens(array)[Audio], + TokensItem(int)[Audio], + Temperature(float)[Audio], + AvgLogprob(float)[Audio], + CompressionRatio(float)[Audio], + NoSpeechProb(float)[Audio], + CreateTranslationRequest(class)[Audio], + File(byte[])[Audio], + Model14(anyOf)[Audio], + ModelVariant1(string)[Audio], + CreateTranslationRequestModel(enum)[Audio], + Prompt(string)[Audio], + AudioResponseFormat(ref)[Audio], + Temperature(double)[Audio], + CreateTranslationResponseJson(class)[Audio], + Text(string)[Audio], + CreateTranslationResponseVerboseJson(class)[Audio], + Language(string)[Audio], + Duration(string)[Audio], + Text(string)[Audio], + Segments(array)[Audio], + TranscriptionSegment(ref)[Audio], + CreateUploadRequest(class)[Uploads], + Filename(string)[Uploads], + CreateUploadRequestPurpose(enum)[Uploads], + Bytes(int)[Uploads], + MimeType(string)[Uploads], + CreateVectorStoreFileBatchRequest(class)[Vector stores], + FileIds(array)[Vector stores], + FileIdsItem(string)[Vector stores], + ChunkingStrategyRequestParam(ref)[Vector stores], + CreateVectorStoreFileRequest(class)[Vector stores], + FileId(string)[Vector stores], + ChunkingStrategyRequestParam(ref)[Vector stores], + CreateVectorStoreRequest(class)[Vector stores], + FileIds(array)[Vector stores], + FileIdsItem(string)[Vector stores], + Name(string)[Vector stores], + VectorStoreExpirationAfter(ref)[Vector stores], + CreateVectorStoreRequestChunkingStrategy(class)[Vector stores], + AutoChunkingStrategyRequestParam(ref)[Vector stores], + StaticChunkingStrategyRequestParam(ref)[Vector stores], + CreateVectorStoreRequestChunkingStrategyDiscriminator(class)[Vector stores], + CreateVectorStoreRequestChunkingStrategyDiscriminatorType(enum)[Vector stores], + CreateVectorStoreRequestMetadata(class)[Vector stores], + VectorStoreExpirationAfter(class)[Vector stores], + VectorStoreExpirationAfterAnchor(enum)[Vector stores], + Days(int)[Vector stores], + DefaultProjectErrorResponse(class)[], + Code(int)[], + Message(string)[], + DeleteAssistantResponse(class)[Assistants], + Id(string)[Assistants], + Deleted(bool)[Assistants], + DeleteAssistantResponseObject(enum)[Assistants], + DeleteFileResponse(class)[Files], + Id(string)[Files], + DeleteFileResponseObject(enum)[Files], + Deleted(bool)[Files], + DeleteMessageResponse(class)[Assistants], + Id(string)[Assistants], + Deleted(bool)[Assistants], + DeleteMessageResponseObject(enum)[Assistants], + DeleteModelResponse(class)[Models], + Id(string)[Models], + Deleted(bool)[Models], + Object(string)[Models], + DeleteThreadResponse(class)[Assistants], + Id(string)[Assistants], + Deleted(bool)[Assistants], + DeleteThreadResponseObject(enum)[Assistants], + DeleteVectorStoreFileResponse(class)[Vector stores], + Id(string)[Vector stores], + Deleted(bool)[Vector stores], + DeleteVectorStoreFileResponseObject(enum)[Vector stores], + DeleteVectorStoreResponse(class)[Vector stores], + Id(string)[Vector stores], + Deleted(bool)[Vector stores], + DeleteVectorStoreResponseObject(enum)[Vector stores], + ErrorResponse(class)[Projects], + Error(ref)[Projects], + FineTuneChatCompletionRequestAssistantMessage(allOf)[], + FineTuneChatCompletionRequestAssistantMessageVariant1(class)[], + Weight(int)[], + ChatCompletionRequestAssistantMessage(ref)[], + FineTuningIntegration(class)[Fine-tuning], + FineTuningIntegrationType(enum)[Fine-tuning], + FineTuningIntegrationWandb(class)[Fine-tuning], + Project(string)[Fine-tuning], + Name(string)[Fine-tuning], + Entity(string)[Fine-tuning], + Tags(array)[Fine-tuning], + TagsItem(string)[Fine-tuning], + FineTuningJob(class)[Fine-tuning], + Id(string)[Fine-tuning], + CreatedAt(int)[Fine-tuning], + FineTuningJobError(class)[Fine-tuning], + Code(string)[Fine-tuning], + Message(string)[Fine-tuning], + Param(string)[Fine-tuning], + FineTunedModel(string)[Fine-tuning], + FinishedAt(int)[Fine-tuning], + FineTuningJobHyperparameters(class)[Fine-tuning], + NEpochs2(oneOf)[Fine-tuning], + FineTuningJobHyperparametersNEpochs(enum)[Fine-tuning], + NEpochsVariant2(int)[Fine-tuning], + Model(string)[Fine-tuning], + FineTuningJobObject(enum)[Fine-tuning], + OrganizationId(string)[Fine-tuning], + ResultFiles(array)[Fine-tuning], + ResultFilesItem(string)[Fine-tuning], + FineTuningJobStatus(enum)[Fine-tuning], + TrainedTokens(int)[Fine-tuning], + TrainingFile(string)[Fine-tuning], + ValidationFile(string)[Fine-tuning], + Integrations(array)[Fine-tuning], + IntegrationsItem(oneOf)[Fine-tuning], + FineTuningIntegration(ref)[Fine-tuning], + Seed(int)[Fine-tuning], + EstimatedFinish(int)[Fine-tuning], + FineTuningJobCheckpoint(class)[Fine-tuning], + Id(string)[Fine-tuning], + CreatedAt(int)[Fine-tuning], + FineTunedModelCheckpoint(string)[Fine-tuning], + StepNumber(int)[Fine-tuning], + FineTuningJobCheckpointMetrics(class)[Fine-tuning], + Step(double)[Fine-tuning], + TrainLoss(double)[Fine-tuning], + TrainMeanTokenAccuracy(double)[Fine-tuning], + ValidLoss(double)[Fine-tuning], + ValidMeanTokenAccuracy(double)[Fine-tuning], + FullValidLoss(double)[Fine-tuning], + FullValidMeanTokenAccuracy(double)[Fine-tuning], + FineTuningJobId(string)[Fine-tuning], + FineTuningJobCheckpointObject(enum)[Fine-tuning], + FineTuningJobEvent(class)[Fine-tuning], + Id(string)[Fine-tuning], + CreatedAt(int)[Fine-tuning], + FineTuningJobEventLevel(enum)[Fine-tuning], + Message(string)[Fine-tuning], + FineTuningJobEventObject(enum)[Fine-tuning], + FinetuneChatRequestInput(class)[], + Messages(array)[], + MessagesItem(oneOf)[], + ChatCompletionRequestSystemMessage(ref)[], + ChatCompletionRequestUserMessage(ref)[], + FineTuneChatCompletionRequestAssistantMessage(ref)[], + ChatCompletionRequestToolMessage(ref)[], + ChatCompletionRequestFunctionMessage(ref)[], + Tools(array)[], + ChatCompletionTool(ref)[], + ParallelToolCalls(ref)[], + Functions(array)[], + ChatCompletionFunctions(ref)[], + FinetuneCompletionRequestInput(class)[], + Prompt(string)[], + Completion(string)[], + Image(class)[Images], + B64Json(string)[Images], + Url(string)[Images], + RevisedPrompt(string)[Images], + ImagesResponse(class)[Images], + Created(int)[Images], + Data(array)[Images], + Image(ref)[Images], Invite(class)[Invites], InviteObject(enum)[Invites], Id(string)[Invites], @@ -1611,6 +1521,10 @@ InvitedAt(int)[Invites], ExpiresAt(int)[Invites], AcceptedAt(int)[Invites], + InviteDeleteResponse(class)[Invites], + InviteDeleteResponseObject(enum)[Invites], + Id(string)[Invites], + Deleted(bool)[Invites], InviteListResponse(class)[Invites], InviteListResponseObject(enum)[Invites], Data(array)[Invites], @@ -1621,30 +1535,184 @@ InviteRequest(class)[Invites], Email(string)[Invites], InviteRequestRole(enum)[Invites], - InviteDeleteResponse(class)[Invites], - InviteDeleteResponseObject(enum)[Invites], - Id(string)[Invites], - Deleted(bool)[Invites], - User(class)[Users], - UserObject(enum)[Users], - Id(string)[Users], - Name(string)[Users], - Email(string)[Users], - UserRole(enum)[Users], - AddedAt(int)[Users], - UserListResponse(class)[Users], - UserListResponseObject(enum)[Users], - Data(array)[Users], - User(ref)[Users], - FirstId(string)[Users], - LastId(string)[Users], - HasMore(bool)[Users], - UserRoleUpdateRequest(class)[Users], - UserRoleUpdateRequestRole(enum)[Users], - UserDeleteResponse(class)[Users], - UserDeleteResponseObject(enum)[Users], - Id(string)[Users], - Deleted(bool)[Users], + ListAssistantsResponse(class)[Assistants], + Object(string)[Assistants], + Data(array)[Assistants], + AssistantObject(ref)[Assistants], + FirstId(string)[Assistants], + LastId(string)[Assistants], + HasMore(bool)[Assistants], + ListAuditLogsResponse(class)[Audit Logs], + ListAuditLogsResponseObject(enum)[Audit Logs], + Data(array)[Audit Logs], + AuditLog(ref)[Audit Logs], + FirstId(string)[Audit Logs], + LastId(string)[Audit Logs], + HasMore(bool)[Audit Logs], + ListBatchesResponse(class)[Batch], + Data(array)[Batch], + Batch(ref)[Batch], + FirstId(string)[Batch], + LastId(string)[Batch], + HasMore(bool)[Batch], + ListBatchesResponseObject(enum)[Batch], + ListFilesResponse(class)[Files], + Object(string)[Files], + Data(array)[Files], + OpenAIFile(ref)[Files], + FirstId(string)[Files], + LastId(string)[Files], + HasMore(bool)[Files], + OpenAIFile(class)[Files, Uploads], + Id(string)[Files, Uploads], + Bytes(int)[Files, Uploads], + CreatedAt(int)[Files, Uploads], + Filename(string)[Files, Uploads], + OpenAIFileObject(enum)[Files, Uploads], + OpenAIFilePurpose(enum)[Files, Uploads], + OpenAIFileStatus(enum)[Files, Uploads], + StatusDetails(string)[Files, Uploads], + ListFineTuningJobCheckpointsResponse(class)[Fine-tuning], + Data(array)[Fine-tuning], + FineTuningJobCheckpoint(ref)[Fine-tuning], + ListFineTuningJobCheckpointsResponseObject(enum)[Fine-tuning], + FirstId(string)[Fine-tuning], + LastId(string)[Fine-tuning], + HasMore(bool)[Fine-tuning], + ListFineTuningJobEventsResponse(class)[Fine-tuning], + Data(array)[Fine-tuning], + FineTuningJobEvent(ref)[Fine-tuning], + ListFineTuningJobEventsResponseObject(enum)[Fine-tuning], + ListMessagesResponse(class)[Assistants], + Object(string)[Assistants], + Data(array)[Assistants], + MessageObject(ref)[Assistants], + FirstId(string)[Assistants], + LastId(string)[Assistants], + HasMore(bool)[Assistants], + ListModelsResponse(class)[Models], + ListModelsResponseObject(enum)[Models], + Data(array)[Models], + Model15(ref)[Models], + Model15(class)[Models], + Id(string)[Models], + Created(int)[Models], + ModelObject(enum)[Models], + OwnedBy(string)[Models], + ListPaginatedFineTuningJobsResponse(class)[Fine-tuning], + Data(array)[Fine-tuning], + FineTuningJob(ref)[Fine-tuning], + HasMore(bool)[Fine-tuning], + ListPaginatedFineTuningJobsResponseObject(enum)[Fine-tuning], + ListRunStepsResponse(class)[Assistants], + Object(string)[Assistants], + Data(array)[Assistants], + RunStepObject(ref)[Assistants], + FirstId(string)[Assistants], + LastId(string)[Assistants], + HasMore(bool)[Assistants], + ListRunsResponse(class)[Assistants], + Object(string)[Assistants], + Data(array)[Assistants], + RunObject(ref)[Assistants], + FirstId(string)[Assistants], + LastId(string)[Assistants], + HasMore(bool)[Assistants], + ListThreadsResponse(class)[], + Object(string)[], + Data(array)[], + ThreadObject(ref)[], + FirstId(string)[], + LastId(string)[], + HasMore(bool)[], + ListVectorStoreFilesResponse(class)[Vector stores], + Object(string)[Vector stores], + Data(array)[Vector stores], + VectorStoreFileObject(ref)[Vector stores], + FirstId(string)[Vector stores], + LastId(string)[Vector stores], + HasMore(bool)[Vector stores], + VectorStoreFileObject(class)[Vector stores], + Id(string)[Vector stores], + VectorStoreFileObjectObject(enum)[Vector stores], + UsageBytes(int)[Vector stores], + CreatedAt(int)[Vector stores], + VectorStoreId(string)[Vector stores], + VectorStoreFileObjectStatus(enum)[Vector stores], + VectorStoreFileObjectLastError(class)[Vector stores], + VectorStoreFileObjectLastErrorCode(enum)[Vector stores], + Message(string)[Vector stores], + VectorStoreFileObjectChunkingStrategy(class)[Vector stores], + StaticChunkingStrategyResponseParam(ref)[Vector stores], + OtherChunkingStrategyResponseParam(ref)[Vector stores], + VectorStoreFileObjectChunkingStrategyDiscriminator(class)[Vector stores], + VectorStoreFileObjectChunkingStrategyDiscriminatorType(enum)[Vector stores], + StaticChunkingStrategyResponseParam(class)[Vector stores], + StaticChunkingStrategyResponseParamType(enum)[Vector stores], + StaticChunkingStrategy(ref)[Vector stores], + OtherChunkingStrategyResponseParam(class)[Vector stores], + OtherChunkingStrategyResponseParamType(enum)[Vector stores], + ListVectorStoresResponse(class)[Vector stores], + Object(string)[Vector stores], + Data(array)[Vector stores], + VectorStoreObject(ref)[Vector stores], + FirstId(string)[Vector stores], + LastId(string)[Vector stores], + HasMore(bool)[Vector stores], + VectorStoreObject(class)[Vector stores], + Id(string)[Vector stores], + VectorStoreObjectObject(enum)[Vector stores], + CreatedAt(int)[Vector stores], + Name(string)[Vector stores], + UsageBytes(int)[Vector stores], + VectorStoreObjectFileCounts(class)[Vector stores], + InProgress(int)[Vector stores], + Completed(int)[Vector stores], + Failed(int)[Vector stores], + Cancelled(int)[Vector stores], + Total(int)[Vector stores], + VectorStoreObjectStatus(enum)[Vector stores], + VectorStoreExpirationAfter(ref)[Vector stores], + ExpiresAt(int)[Vector stores], + LastActiveAt(int)[Vector stores], + VectorStoreObjectMetadata(class)[Vector stores], + ModifyAssistantRequest(class)[Assistants], + Model_AllOf1Wrapped(anyOf)[Assistants], + ModelVariant1(string)[Assistants], + Name(string)[Assistants], + Description(string)[Assistants], + Instructions(string)[Assistants], + Tools(array)[Assistants], + ToolsItem7(oneOf)[Assistants], + AssistantToolsCode(ref)[Assistants], + AssistantToolsFileSearch(ref)[Assistants], + AssistantToolsFunction(ref)[Assistants], + ModifyAssistantRequestToolDiscriminator(class)[Assistants], + ModifyAssistantRequestToolDiscriminatorType(enum)[Assistants], + ModifyAssistantRequestToolResources(class)[Assistants], + ModifyAssistantRequestToolResourcesCodeInterpreter(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + ModifyAssistantRequestToolResourcesFileSearch(class)[Assistants], + VectorStoreIds(array)[Assistants], + VectorStoreIdsItem(string)[Assistants], + ModifyAssistantRequestMetadata(class)[Assistants], + Temperature(double)[Assistants], + TopP(double)[Assistants], + AssistantsApiResponseFormatOption(ref)[Assistants], + ModifyMessageRequest(class)[Assistants], + ModifyMessageRequestMetadata(class)[Assistants], + ModifyRunRequest(class)[Assistants], + ModifyRunRequestMetadata(class)[Assistants], + ModifyThreadRequest(class)[Assistants], + ModifyThreadRequestToolResources(class)[Assistants], + ModifyThreadRequestToolResourcesCodeInterpreter(class)[Assistants], + FileIds(array)[Assistants], + FileIdsItem(string)[Assistants], + ModifyThreadRequestToolResourcesFileSearch(class)[Assistants], + VectorStoreIds(array)[Assistants], + VectorStoreIdsItem(string)[Assistants], + ModifyThreadRequestMetadata(class)[Assistants], Project(class)[Projects], Id(string)[Projects], ProjectObject(enum)[Projects], @@ -1652,20 +1720,16 @@ CreatedAt(int)[Projects], ArchivedAt(int)[Projects], ProjectStatus(enum)[Projects], - ProjectListResponse(class)[Projects], - ProjectListResponseObject(enum)[Projects], - Data(array)[Projects], - Project(ref)[Projects], - FirstId(string)[Projects], - LastId(string)[Projects], - HasMore(bool)[Projects], - ProjectCreateRequest(class)[Projects], - Name(string)[Projects], - ProjectUpdateRequest(class)[Projects], + ProjectApiKey(class)[Projects], + ProjectApiKeyObject(enum)[Projects], + RedactedValue(string)[Projects], Name(string)[Projects], - DefaultProjectErrorResponse(class)[], - Code(int)[], - Message(string)[], + CreatedAt(int)[Projects], + Id(string)[Projects], + ProjectApiKeyOwner(class)[Projects], + ProjectApiKeyOwnerType(enum)[Projects], + ProjectUser(ref)[Projects], + ProjectServiceAccount(ref)[Projects], ProjectUser(class)[Projects], ProjectUserObject(enum)[Projects], Id(string)[Projects], @@ -1673,35 +1737,62 @@ Email(string)[Projects], ProjectUserRole(enum)[Projects], AddedAt(int)[Projects], - ProjectUserListResponse(class)[Projects], - Object(string)[Projects], - Data(array)[Projects], - ProjectUser(ref)[Projects], - FirstId(string)[Projects], - LastId(string)[Projects], - HasMore(bool)[Projects], - ProjectUserCreateRequest(class)[Projects], - UserId(string)[Projects], - ProjectUserCreateRequestRole(enum)[Projects], - ProjectUserUpdateRequest(class)[Projects], - ProjectUserUpdateRequestRole(enum)[Projects], - ProjectUserDeleteResponse(class)[Projects], - ProjectUserDeleteResponseObject(enum)[Projects], - Id(string)[Projects], - Deleted(bool)[Projects], ProjectServiceAccount(class)[Projects], ProjectServiceAccountObject(enum)[Projects], Id(string)[Projects], Name(string)[Projects], ProjectServiceAccountRole(enum)[Projects], CreatedAt(int)[Projects], - ProjectServiceAccountListResponse(class)[Projects], - ProjectServiceAccountListResponseObject(enum)[Projects], + ProjectApiKeyDeleteResponse(class)[Projects], + ProjectApiKeyDeleteResponseObject(enum)[Projects], + Id(string)[Projects], + Deleted(bool)[Projects], + ProjectApiKeyListResponse(class)[Projects], + ProjectApiKeyListResponseObject(enum)[Projects], Data(array)[Projects], - ProjectServiceAccount(ref)[Projects], + ProjectApiKey(ref)[Projects], + FirstId(string)[Projects], + LastId(string)[Projects], + HasMore(bool)[Projects], + ProjectCreateRequest(class)[Projects], + Name(string)[Projects], + ProjectListResponse(class)[Projects], + ProjectListResponseObject(enum)[Projects], + Data(array)[Projects], + Project(ref)[Projects], + FirstId(string)[Projects], + LastId(string)[Projects], + HasMore(bool)[Projects], + ProjectRateLimit(class)[Projects], + ProjectRateLimitObject(enum)[Projects], + Id(string)[Projects], + Model(string)[Projects], + MaxRequestsPer1Minute(int)[Projects], + MaxTokensPer1Minute(int)[Projects], + MaxImagesPer1Minute(int)[Projects], + MaxAudioMegabytesPer1Minute(int)[Projects], + MaxRequestsPer1Day(int)[Projects], + Batch1DayMaxInputTokens(int)[Projects], + ProjectRateLimitListResponse(class)[Projects], + ProjectRateLimitListResponseObject(enum)[Projects], + Data(array)[Projects], + ProjectRateLimit(ref)[Projects], FirstId(string)[Projects], LastId(string)[Projects], HasMore(bool)[Projects], + ProjectRateLimitUpdateRequest(class)[Projects], + MaxRequestsPer1Minute(int)[Projects], + MaxTokensPer1Minute(int)[Projects], + MaxImagesPer1Minute(int)[Projects], + MaxAudioMegabytesPer1Minute(int)[Projects], + MaxRequestsPer1Day(int)[Projects], + Batch1DayMaxInputTokens(int)[Projects], + ProjectServiceAccountApiKey(class)[Projects], + ProjectServiceAccountApiKeyObject(enum)[Projects], + Value(string)[Projects], + Name(string)[Projects], + CreatedAt(int)[Projects], + Id(string)[Projects], ProjectServiceAccountCreateRequest(class)[Projects], Name(string)[Projects], ProjectServiceAccountCreateResponse(class)[Projects], @@ -1711,105 +1802,723 @@ ProjectServiceAccountCreateResponseRole(enum)[Projects], CreatedAt(int)[Projects], ProjectServiceAccountApiKey(ref)[Projects], - ProjectServiceAccountApiKey(class)[Projects], - ProjectServiceAccountApiKeyObject(enum)[Projects], - Value(string)[Projects], - Name(string)[Projects], - CreatedAt(int)[Projects], - Id(string)[Projects], ProjectServiceAccountDeleteResponse(class)[Projects], ProjectServiceAccountDeleteResponseObject(enum)[Projects], Id(string)[Projects], Deleted(bool)[Projects], - ProjectApiKey(class)[Projects], - ProjectApiKeyObject(enum)[Projects], - RedactedValue(string)[Projects], - Name(string)[Projects], - CreatedAt(int)[Projects], - Id(string)[Projects], - ProjectApiKeyOwner(class)[Projects], - ProjectApiKeyOwnerType(enum)[Projects], - ProjectUser(ref)[Projects], - ProjectServiceAccount(ref)[Projects], - ProjectApiKeyListResponse(class)[Projects], - ProjectApiKeyListResponseObject(enum)[Projects], + ProjectServiceAccountListResponse(class)[Projects], + ProjectServiceAccountListResponseObject(enum)[Projects], Data(array)[Projects], - ProjectApiKey(ref)[Projects], + ProjectServiceAccount(ref)[Projects], FirstId(string)[Projects], LastId(string)[Projects], HasMore(bool)[Projects], - ProjectApiKeyDeleteResponse(class)[Projects], - ProjectApiKeyDeleteResponseObject(enum)[Projects], + ProjectUpdateRequest(class)[Projects], + Name(string)[Projects], + ProjectUserCreateRequest(class)[Projects], + UserId(string)[Projects], + ProjectUserCreateRequestRole(enum)[Projects], + ProjectUserDeleteResponse(class)[Projects], + ProjectUserDeleteResponseObject(enum)[Projects], Id(string)[Projects], Deleted(bool)[Projects], - CreateChatCompletionRequest(ref)[Chat], - CreateCompletionRequest(ref)[Completions], - CreateImageRequest(ref)[Images], - CreateImageEditRequest(ref)[Images], - CreateImageVariationRequest(ref)[Images], - CreateEmbeddingRequest(ref)[Embeddings], + ProjectUserListResponse(class)[Projects], + Object(string)[Projects], + Data(array)[Projects], + ProjectUser(ref)[Projects], + FirstId(string)[Projects], + LastId(string)[Projects], + HasMore(bool)[Projects], + ProjectUserUpdateRequest(class)[Projects], + ProjectUserUpdateRequestRole(enum)[Projects], + RealtimeClientEventConversationItemCreate(class)[], + EventId(string)[], + RealtimeClientEventConversationItemCreateType(enum)[], + PreviousItemId(string)[], + RealtimeConversationItem(ref)[], + RealtimeConversationItem(class)[], + Id(string)[], + RealtimeConversationItemType(enum)[], + RealtimeConversationItemObject(enum)[], + RealtimeConversationItemStatus(enum)[], + RealtimeConversationItemRole(enum)[], + Content(array)[], + RealtimeConversationItemContentItem(class)[], + RealtimeConversationItemContentItemType(enum)[], + Text(string)[], + Audio(string)[], + Transcript(string)[], + CallId(string)[], + Name(string)[], + Arguments(string)[], + Output(string)[], + RealtimeClientEventConversationItemDelete(class)[], + EventId(string)[], + RealtimeClientEventConversationItemDeleteType(enum)[], + ItemId(string)[], + RealtimeClientEventConversationItemTruncate(class)[], + EventId(string)[], + RealtimeClientEventConversationItemTruncateType(enum)[], + ItemId(string)[], + ContentIndex(int)[], + AudioEndMs(int)[], + RealtimeClientEventInputAudioBufferAppend(class)[], + EventId(string)[], + RealtimeClientEventInputAudioBufferAppendType(enum)[], + Audio(string)[], + RealtimeClientEventInputAudioBufferClear(class)[], + EventId(string)[], + RealtimeClientEventInputAudioBufferClearType(enum)[], + RealtimeClientEventInputAudioBufferCommit(class)[], + EventId(string)[], + RealtimeClientEventInputAudioBufferCommitType(enum)[], + RealtimeClientEventResponseCancel(class)[], + EventId(string)[], + RealtimeClientEventResponseCancelType(enum)[], + RealtimeClientEventResponseCreate(class)[], + EventId(string)[], + RealtimeClientEventResponseCreateType(enum)[], + RealtimeSession(ref)[], + RealtimeSession(class)[], + RealtimeSessionModalities(class)[], + RealtimeSessionModalitie(enum)[], + Instructions(string)[], + RealtimeSessionVoice(enum)[], + InputAudioFormat(string)[], + OutputAudioFormat(string)[], + RealtimeSessionInputAudioTranscription(class)[], + Model(string)[], + RealtimeSessionTurnDetection(class)[], + Type(string)[], + Threshold(double)[], + PrefixPaddingMs(int)[], + SilenceDurationMs(int)[], + Tools(array)[], + RealtimeSessionTool(class)[], + RealtimeSessionToolType(enum)[], + Name(string)[], + Description(string)[], + RealtimeSessionToolParameters(class)[], + ToolChoice(string)[], + Temperature(double)[], + MaxResponseOutputTokens(oneOf)[], + MaxResponseOutputTokensVariant1(int)[], + RealtimeSessionMaxResponseOutputTokens(enum)[], + RealtimeClientEventSessionUpdate(class)[], + EventId(string)[], + RealtimeClientEventSessionUpdateType(enum)[], + RealtimeSession(ref)[], + RealtimeResponse(class)[], + Id(string)[], + RealtimeResponseObject(enum)[], + RealtimeResponseStatus(enum)[], + RealtimeResponseStatusDetails(class)[], + RealtimeResponseStatusDetailsType(enum)[], + RealtimeResponseStatusDetailsReason(enum)[], + RealtimeResponseStatusDetailsError(class)[], + Type(string)[], + Code(string)[], + Output(array)[], + RealtimeConversationItem(ref)[], + RealtimeResponseUsage(class)[], + TotalTokens(int)[], + InputTokens(int)[], + OutputTokens(int)[], + RealtimeResponseUsageInputTokenDetails(class)[], + CachedTokens(int)[], + TextTokens(int)[], + AudioTokens(int)[], + RealtimeResponseUsageOutputTokenDetails(class)[], + TextTokens(int)[], + AudioTokens(int)[], + RealtimeServerEventConversationCreated(class)[], + EventId(string)[], + RealtimeServerEventConversationCreatedType(enum)[], + RealtimeServerEventConversationCreatedConversation(class)[], + Id(string)[], + Object(string)[], + RealtimeServerEventConversationItemCreated(class)[], + EventId(string)[], + RealtimeServerEventConversationItemCreatedType(enum)[], + PreviousItemId(string)[], + RealtimeConversationItem(ref)[], + RealtimeServerEventConversationItemDeleted(class)[], + EventId(string)[], + RealtimeServerEventConversationItemDeletedType(enum)[], + ItemId(string)[], + RealtimeServerEventConversationItemInputAudioTranscriptionCompleted(class)[], + EventId(string)[], + RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType(enum)[], + ItemId(string)[], + ContentIndex(int)[], + Transcript(string)[], + RealtimeServerEventConversationItemInputAudioTranscriptionFailed(class)[], + EventId(string)[], + RealtimeServerEventConversationItemInputAudioTranscriptionFailedType(enum)[], + ItemId(string)[], + ContentIndex(int)[], + RealtimeServerEventConversationItemInputAudioTranscriptionFailedError(class)[], + Type(string)[], + Code(string)[], + Message(string)[], + Param(string)[], + RealtimeServerEventConversationItemTruncated(class)[], + EventId(string)[], + RealtimeServerEventConversationItemTruncatedType(enum)[], + ItemId(string)[], + ContentIndex(int)[], + AudioEndMs(int)[], + RealtimeServerEventError(class)[], + EventId(string)[], + RealtimeServerEventErrorType(enum)[], + RealtimeServerEventErrorError(class)[], + Type(string)[], + Code(string)[], + Message(string)[], + Param(string)[], + EventId(string)[], + RealtimeServerEventInputAudioBufferCleared(class)[], + EventId(string)[], + RealtimeServerEventInputAudioBufferClearedType(enum)[], + RealtimeServerEventInputAudioBufferCommitted(class)[], + EventId(string)[], + RealtimeServerEventInputAudioBufferCommittedType(enum)[], + PreviousItemId(string)[], + ItemId(string)[], + RealtimeServerEventInputAudioBufferSpeechStarted(class)[], + EventId(string)[], + RealtimeServerEventInputAudioBufferSpeechStartedType(enum)[], + AudioStartMs(int)[], + ItemId(string)[], + RealtimeServerEventInputAudioBufferSpeechStopped(class)[], + EventId(string)[], + RealtimeServerEventInputAudioBufferSpeechStoppedType(enum)[], + AudioEndMs(int)[], + ItemId(string)[], + RealtimeServerEventRateLimitsUpdated(class)[], + EventId(string)[], + RealtimeServerEventRateLimitsUpdatedType(enum)[], + RateLimits(array)[], + RealtimeServerEventRateLimitsUpdatedRateLimit(class)[], + Name(string)[], + Limit(int)[], + Remaining(int)[], + ResetSeconds(double)[], + RealtimeServerEventResponseAudioDelta(class)[], + EventId(string)[], + RealtimeServerEventResponseAudioDeltaType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + Delta(string)[], + RealtimeServerEventResponseAudioDone(class)[], + EventId(string)[], + RealtimeServerEventResponseAudioDoneType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + RealtimeServerEventResponseAudioTranscriptDelta(class)[], + EventId(string)[], + RealtimeServerEventResponseAudioTranscriptDeltaType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + Delta(string)[], + RealtimeServerEventResponseAudioTranscriptDone(class)[], + EventId(string)[], + RealtimeServerEventResponseAudioTranscriptDoneType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + Transcript(string)[], + RealtimeServerEventResponseContentPartAdded(class)[], + EventId(string)[], + RealtimeServerEventResponseContentPartAddedType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + RealtimeServerEventResponseContentPartAddedPart(class)[], + RealtimeServerEventResponseContentPartAddedPartType(enum)[], + Text(string)[], + Audio(string)[], + Transcript(string)[], + RealtimeServerEventResponseContentPartDone(class)[], + EventId(string)[], + RealtimeServerEventResponseContentPartDoneType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + RealtimeServerEventResponseContentPartDonePart(class)[], + Type(string)[], + Text(string)[], + Audio(string)[], + Transcript(string)[], + RealtimeServerEventResponseCreated(class)[], + EventId(string)[], + RealtimeServerEventResponseCreatedType(enum)[], + RealtimeResponse(ref)[], + RealtimeServerEventResponseDone(class)[], + EventId(string)[], + RealtimeServerEventResponseDoneType(enum)[], + RealtimeResponse(ref)[], + RealtimeServerEventResponseFunctionCallArgumentsDelta(class)[], + EventId(string)[], + RealtimeServerEventResponseFunctionCallArgumentsDeltaType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + CallId(string)[], + Delta(string)[], + RealtimeServerEventResponseFunctionCallArgumentsDone(class)[], + EventId(string)[], + RealtimeServerEventResponseFunctionCallArgumentsDoneType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + CallId(string)[], + Arguments(string)[], + RealtimeServerEventResponseOutputItemAdded(class)[], + EventId(string)[], + RealtimeServerEventResponseOutputItemAddedType(enum)[], + ResponseId(string)[], + OutputIndex(int)[], + RealtimeConversationItem(ref)[], + RealtimeServerEventResponseOutputItemDone(class)[], + EventId(string)[], + RealtimeServerEventResponseOutputItemDoneType(enum)[], + ResponseId(string)[], + OutputIndex(int)[], + RealtimeConversationItem(ref)[], + RealtimeServerEventResponseTextDelta(class)[], + EventId(string)[], + RealtimeServerEventResponseTextDeltaType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + Delta(string)[], + RealtimeServerEventResponseTextDone(class)[], + EventId(string)[], + RealtimeServerEventResponseTextDoneType(enum)[], + ResponseId(string)[], + ItemId(string)[], + OutputIndex(int)[], + ContentIndex(int)[], + Text(string)[], + RealtimeServerEventSessionCreated(class)[], + EventId(string)[], + RealtimeServerEventSessionCreatedType(enum)[], + RealtimeSession(ref)[], + RealtimeServerEventSessionUpdated(class)[], + EventId(string)[], + RealtimeServerEventSessionUpdatedType(enum)[], + RealtimeSession(ref)[], + SubmitToolOutputsRunRequest(class)[Assistants], + ToolOutputs(array)[Assistants], + SubmitToolOutputsRunRequestToolOutput(class)[Assistants], + ToolCallId(string)[Assistants], + Output(string)[Assistants], + Stream(bool)[Assistants], + UpdateVectorStoreRequest(class)[Vector stores], + Name(string)[Vector stores], + VectorStoreExpirationAfter(ref)[Vector stores], + UpdateVectorStoreRequestMetadata(class)[Vector stores], + Upload(class)[Uploads], + Id(string)[Uploads], + CreatedAt(int)[Uploads], + Filename(string)[Uploads], + Bytes(int)[Uploads], + Purpose(string)[Uploads], + UploadStatus(enum)[Uploads], + ExpiresAt(int)[Uploads], + UploadObject(enum)[Uploads], + OpenAIFile(ref)[Uploads], + UploadPart(class)[Uploads], + Id(string)[Uploads], + CreatedAt(int)[Uploads], + UploadId(string)[Uploads], + UploadPartObject(enum)[Uploads], + UsageAudioSpeechesResult(class)[Usage], + UsageAudioSpeechesResultObject(enum)[Usage], + Characters(int)[Usage], + NumModelRequests(int)[Usage], + ProjectId(string)[Usage], + UserId(string)[Usage], + ApiKeyId(string)[Usage], + Model(string)[Usage], + UsageAudioTranscriptionsResult(class)[Usage], + UsageAudioTranscriptionsResultObject(enum)[Usage], + Seconds(int)[Usage], + NumModelRequests(int)[Usage], + ProjectId(string)[Usage], + UserId(string)[Usage], + ApiKeyId(string)[Usage], + Model(string)[Usage], + UsageCodeInterpreterSessionsResult(class)[Usage], + UsageCodeInterpreterSessionsResultObject(enum)[Usage], + Sessions(int)[Usage], + ProjectId(string)[Usage], + UsageCompletionsResult(class)[Usage], + UsageCompletionsResultObject(enum)[Usage], + InputTokens(int)[Usage], + InputCachedTokens(int)[Usage], + OutputTokens(int)[Usage], + NumModelRequests(int)[Usage], + ProjectId(string)[Usage], + UserId(string)[Usage], + ApiKeyId(string)[Usage], + Model(string)[Usage], + Batch(bool)[Usage], + UsageEmbeddingsResult(class)[Usage], + UsageEmbeddingsResultObject(enum)[Usage], + InputTokens(int)[Usage], + NumModelRequests(int)[Usage], + ProjectId(string)[Usage], + UserId(string)[Usage], + ApiKeyId(string)[Usage], + Model(string)[Usage], + UsageImagesResult(class)[Usage], + UsageImagesResultObject(enum)[Usage], + Images(int)[Usage], + NumModelRequests(int)[Usage], + Source(string)[Usage], + Size(string)[Usage], + ProjectId(string)[Usage], + UserId(string)[Usage], + ApiKeyId(string)[Usage], + Model(string)[Usage], + UsageModerationsResult(class)[Usage], + UsageModerationsResultObject(enum)[Usage], + InputTokens(int)[Usage], + NumModelRequests(int)[Usage], + ProjectId(string)[Usage], + UserId(string)[Usage], + ApiKeyId(string)[Usage], + Model(string)[Usage], + UsageResponse(class)[Usage], + UsageResponseObject(enum)[Usage], + Data(array)[Usage], + UsageTimeBucket(ref)[Usage], + HasMore(bool)[Usage], + NextPage(string)[Usage], + UsageTimeBucket(class)[Usage], + UsageTimeBucketObject(enum)[Usage], + StartTime(int)[Usage], + EndTime(int)[Usage], + Result(array)[Usage], + ResultItem(oneOf)[Usage], + UsageCompletionsResult(ref)[Usage], + UsageEmbeddingsResult(ref)[Usage], + UsageModerationsResult(ref)[Usage], + UsageImagesResult(ref)[Usage], + UsageAudioSpeechesResult(ref)[Usage], + UsageAudioTranscriptionsResult(ref)[Usage], + UsageVectorStoresResult(ref)[Usage], + UsageCodeInterpreterSessionsResult(ref)[Usage], + CostsResult(ref)[Usage], + UsageTimeBucketResultItemDiscriminator(class)[Usage], + UsageTimeBucketResultItemDiscriminatorObject(enum)[Usage], + UsageVectorStoresResult(class)[Usage], + UsageVectorStoresResultObject(enum)[Usage], + UsageBytes(int)[Usage], + ProjectId(string)[Usage], + User(class)[Users], + UserObject(enum)[Users], + Id(string)[Users], + Name(string)[Users], + Email(string)[Users], + UserRole(enum)[Users], + AddedAt(int)[Users], + UserDeleteResponse(class)[Users], + UserDeleteResponseObject(enum)[Users], + Id(string)[Users], + Deleted(bool)[Users], + UserListResponse(class)[Users], + UserListResponseObject(enum)[Users], + Data(array)[Users], + User(ref)[Users], + FirstId(string)[Users], + LastId(string)[Users], + HasMore(bool)[Users], + UserRoleUpdateRequest(class)[Users], + UserRoleUpdateRequestRole(enum)[Users], + VectorStoreFileBatchObject(class)[Vector stores], + Id(string)[Vector stores], + VectorStoreFileBatchObjectObject(enum)[Vector stores], + CreatedAt(int)[Vector stores], + VectorStoreId(string)[Vector stores], + VectorStoreFileBatchObjectStatus(enum)[Vector stores], + VectorStoreFileBatchObjectFileCounts(class)[Vector stores], + InProgress(int)[Vector stores], + Completed(int)[Vector stores], + Failed(int)[Vector stores], + Cancelled(int)[Vector stores], + Total(int)[Vector stores], + CreateAssistantRequest(ref)[Assistants], + ModifyAssistantRequest(ref)[Assistants], CreateSpeechRequest(ref)[Audio], CreateTranscriptionRequest(ref)[Audio], CreateTranslationRequest(ref)[Audio], - CreateFileRequest(ref)[Files], - CreateUploadRequest(ref)[Uploads], - AddUploadPartRequest(ref)[Uploads], - CompleteUploadRequest(ref)[Uploads], - CreateFineTuningJobRequest(ref)[Fine-tuning], - CreateModerationRequest(ref)[Moderations], - CreateAssistantRequest(ref)[Assistants], - ModifyAssistantRequest(ref)[Assistants], - CreateThreadRequest(ref)[Assistants], - ModifyThreadRequest(ref)[Assistants], - CreateMessageRequest(ref)[Assistants], - ModifyMessageRequest(ref)[Assistants], - CreateThreadAndRunRequest(ref)[Assistants], - CreateRunRequest(ref)[Assistants], - ModifyRunRequest(ref)[Assistants], - SubmitToolOutputsRunRequest(ref)[Assistants], - CreateVectorStoreRequest(ref)[Vector Stores], - UpdateVectorStoreRequest(ref)[Vector Stores], - CreateVectorStoreFileRequest(ref)[Vector Stores], - CreateVectorStoreFileBatchRequest(ref)[Vector Stores], CreateBatchRequest(class)[Batch], InputFileId(string)[Batch], CreateBatchRequestEndpoint(enum)[Batch], CreateBatchRequestCompletionWindow(enum)[Batch], CreateBatchRequestMetadata(class)[Batch], Metadata(string)[Batch], + CreateChatCompletionRequest(ref)[Chat], + CreateCompletionRequest(ref)[Completions], + CreateEmbeddingRequest(ref)[Embeddings], + CreateFileRequest(ref)[Files], + CreateFineTuningJobRequest(ref)[Fine-tuning], + CreateImageEditRequest(ref)[Images], + CreateImageRequest(ref)[Images], + CreateImageVariationRequest(ref)[Images], + CreateModerationRequest(ref)[Moderations], InviteRequest(ref)[Invites], - UserRoleUpdateRequest(ref)[Users], ProjectCreateRequest(ref)[Projects], ProjectUpdateRequest(ref)[Projects], + ProjectRateLimitUpdateRequest(ref)[Projects], + ProjectServiceAccountCreateRequest(ref)[Projects], ProjectUserCreateRequest(ref)[Projects], ProjectUserUpdateRequest(ref)[Projects], - ProjectServiceAccountCreateRequest(ref)[Projects], + UserRoleUpdateRequest(ref)[Users], + CreateThreadRequest(ref)[Assistants], + CreateThreadAndRunRequest(ref)[Assistants], + ModifyThreadRequest(ref)[Assistants], + CreateMessageRequest(ref)[Assistants], + ModifyMessageRequest(ref)[Assistants], + CreateRunRequest(ref)[Assistants], + ModifyRunRequest(ref)[Assistants], + SubmitToolOutputsRunRequest(ref)[Assistants], + CreateUploadRequest(ref)[Uploads], + CompleteUploadRequest(ref)[Uploads], + AddUploadPartRequest(ref)[Uploads], + CreateVectorStoreRequest(ref)[Vector stores], + UpdateVectorStoreRequest(ref)[Vector stores], + CreateVectorStoreFileBatchRequest(ref)[Vector stores], + CreateVectorStoreFileRequest(ref)[Vector stores], + ListAssistantsLimit(int)[Assistants], + ListAssistantsOrder(enum)[Assistants], + ListAssistantsAfter(string)[Assistants], + ListAssistantsBefore(string)[Assistants], + GetAssistantAssistantId(string)[Assistants], + ModifyAssistantAssistantId(string)[Assistants], + DeleteAssistantAssistantId(string)[Assistants], + ListBatchesAfter(string)[Batch], + ListBatchesLimit(int)[Batch], + RetrieveBatchBatchId(string)[Batch], + CancelBatchBatchId(string)[Batch], ListFilesPurpose(string)[Files], + ListFilesLimit(int)[Files], + ListFilesOrder(enum)[Files], + ListFilesAfter(string)[Files], DeleteFileFileId(string)[Files], RetrieveFileFileId(string)[Files], DownloadFileFileId(string)[Files], - AddUploadPartUploadId(string)[Uploads], - CompleteUploadUploadId(string)[Uploads], - CancelUploadUploadId(string)[Uploads], ListPaginatedFineTuningJobsAfter(string)[Fine-tuning], ListPaginatedFineTuningJobsLimit(int)[Fine-tuning], RetrieveFineTuningJobFineTuningJobId(string)[Fine-tuning], - ListFineTuningEventsFineTuningJobId(string)[Fine-tuning], - ListFineTuningEventsAfter(string)[Fine-tuning], - ListFineTuningEventsLimit(int)[Fine-tuning], CancelFineTuningJobFineTuningJobId(string)[Fine-tuning], ListFineTuningJobCheckpointsFineTuningJobId(string)[Fine-tuning], ListFineTuningJobCheckpointsAfter(string)[Fine-tuning], ListFineTuningJobCheckpointsLimit(int)[Fine-tuning], + ListFineTuningEventsFineTuningJobId(string)[Fine-tuning], + ListFineTuningEventsAfter(string)[Fine-tuning], + ListFineTuningEventsLimit(int)[Fine-tuning], RetrieveModelModel(string)[Models], DeleteModelModel(string)[Models], - ListAssistantsLimit(int)[Assistants], - ListAssistantsOrder(enum)[Assistants], - ListAssistantsAfter(string)[Assistants], - ListAssistantsBefore(string)[Assistants], - GetAssistantAssistantId(string)[Assistants], - ModifyAssistantAssistantId(string)[Assistants], - DeleteAssistantAssistantId(string)[Assistants], + ListAuditLogsEffectiveAt(class)[Audit Logs], + Gt(int)[Audit Logs], + Gte(int)[Audit Logs], + Lt(int)[Audit Logs], + Lte(int)[Audit Logs], + ListAuditLogsProjectIds(array)[Audit Logs], + ListAuditLogsProjectIdsItem(string)[Audit Logs], + ListAuditLogsEventTypes(array)[Audit Logs], + AuditLogEventType(ref)[Audit Logs], + ListAuditLogsActorIds(array)[Audit Logs], + ListAuditLogsActorIdsItem(string)[Audit Logs], + ListAuditLogsActorEmails(array)[Audit Logs], + ListAuditLogsActorEmailsItem(string)[Audit Logs], + ListAuditLogsResourceIds(array)[Audit Logs], + ListAuditLogsResourceIdsItem(string)[Audit Logs], + ListAuditLogsLimit(int)[Audit Logs], + ListAuditLogsAfter(string)[Audit Logs], + ListAuditLogsBefore(string)[Audit Logs], + UsageCostsStartTime(int)[Usage], + UsageCostsEndTime(int)[Usage], + UsageCostsBucketWidth(enum)[Usage], + UsageCostsProjectIds(array)[Usage], + UsageCostsProjectIdsItem(string)[Usage], + UsageCostsGroupBy(array)[Usage], + UsageCostsGroupByItem(enum)[Usage], + UsageCostsLimit(int)[Usage], + UsageCostsPage(string)[Usage], + ListInvitesLimit(int)[Invites], + ListInvitesAfter(string)[Invites], + RetrieveInviteInviteId(string)[Invites], + DeleteInviteInviteId(string)[Invites], + ListProjectsLimit(int)[Projects], + ListProjectsAfter(string)[Projects], + ListProjectsIncludeArchived(bool)[Projects], + RetrieveProjectProjectId(string)[Projects], + ModifyProjectProjectId(string)[Projects], + ListProjectApiKeysProjectId(string)[Projects], + ListProjectApiKeysLimit(int)[Projects], + ListProjectApiKeysAfter(string)[Projects], + RetrieveProjectApiKeyProjectId(string)[Projects], + RetrieveProjectApiKeyKeyId(string)[Projects], + DeleteProjectApiKeyProjectId(string)[Projects], + DeleteProjectApiKeyKeyId(string)[Projects], + ArchiveProjectProjectId(string)[Projects], + ListProjectRateLimitsProjectId(string)[Projects], + ListProjectRateLimitsLimit(int)[Projects], + ListProjectRateLimitsAfter(string)[Projects], + ListProjectRateLimitsBefore(string)[Projects], + UpdateProjectRateLimitsProjectId(string)[Projects], + UpdateProjectRateLimitsRateLimitId(string)[Projects], + ListProjectServiceAccountsProjectId(string)[Projects], + ListProjectServiceAccountsLimit(int)[Projects], + ListProjectServiceAccountsAfter(string)[Projects], + CreateProjectServiceAccountProjectId(string)[Projects], + RetrieveProjectServiceAccountProjectId(string)[Projects], + RetrieveProjectServiceAccountServiceAccountId(string)[Projects], + DeleteProjectServiceAccountProjectId(string)[Projects], + DeleteProjectServiceAccountServiceAccountId(string)[Projects], + ListProjectUsersProjectId(string)[Projects], + ListProjectUsersLimit(int)[Projects], + ListProjectUsersAfter(string)[Projects], + CreateProjectUserProjectId(string)[Projects], + RetrieveProjectUserProjectId(string)[Projects], + RetrieveProjectUserUserId(string)[Projects], + ModifyProjectUserProjectId(string)[Projects], + ModifyProjectUserUserId(string)[Projects], + DeleteProjectUserProjectId(string)[Projects], + DeleteProjectUserUserId(string)[Projects], + UsageAudioSpeechesStartTime(int)[Usage], + UsageAudioSpeechesEndTime(int)[Usage], + UsageAudioSpeechesBucketWidth(enum)[Usage], + UsageAudioSpeechesProjectIds(array)[Usage], + UsageAudioSpeechesProjectIdsItem(string)[Usage], + UsageAudioSpeechesUserIds(array)[Usage], + UsageAudioSpeechesUserIdsItem(string)[Usage], + UsageAudioSpeechesApiKeyIds(array)[Usage], + UsageAudioSpeechesApiKeyIdsItem(string)[Usage], + UsageAudioSpeechesModels(array)[Usage], + UsageAudioSpeechesModelsItem(string)[Usage], + UsageAudioSpeechesGroupBy(array)[Usage], + UsageAudioSpeechesGroupByItem(enum)[Usage], + UsageAudioSpeechesLimit(int)[Usage], + UsageAudioSpeechesPage(string)[Usage], + UsageAudioTranscriptionsStartTime(int)[Usage], + UsageAudioTranscriptionsEndTime(int)[Usage], + UsageAudioTranscriptionsBucketWidth(enum)[Usage], + UsageAudioTranscriptionsProjectIds(array)[Usage], + UsageAudioTranscriptionsProjectIdsItem(string)[Usage], + UsageAudioTranscriptionsUserIds(array)[Usage], + UsageAudioTranscriptionsUserIdsItem(string)[Usage], + UsageAudioTranscriptionsApiKeyIds(array)[Usage], + UsageAudioTranscriptionsApiKeyIdsItem(string)[Usage], + UsageAudioTranscriptionsModels(array)[Usage], + UsageAudioTranscriptionsModelsItem(string)[Usage], + UsageAudioTranscriptionsGroupBy(array)[Usage], + UsageAudioTranscriptionsGroupByItem(enum)[Usage], + UsageAudioTranscriptionsLimit(int)[Usage], + UsageAudioTranscriptionsPage(string)[Usage], + UsageCodeInterpreterSessionsStartTime(int)[Usage], + UsageCodeInterpreterSessionsEndTime(int)[Usage], + UsageCodeInterpreterSessionsBucketWidth(enum)[Usage], + UsageCodeInterpreterSessionsProjectIds(array)[Usage], + UsageCodeInterpreterSessionsProjectIdsItem(string)[Usage], + UsageCodeInterpreterSessionsGroupBy(array)[Usage], + UsageCodeInterpreterSessionsGroupByItem(enum)[Usage], + UsageCodeInterpreterSessionsLimit(int)[Usage], + UsageCodeInterpreterSessionsPage(string)[Usage], + UsageCompletionsStartTime(int)[Usage], + UsageCompletionsEndTime(int)[Usage], + UsageCompletionsBucketWidth(enum)[Usage], + UsageCompletionsProjectIds(array)[Usage], + UsageCompletionsProjectIdsItem(string)[Usage], + UsageCompletionsUserIds(array)[Usage], + UsageCompletionsUserIdsItem(string)[Usage], + UsageCompletionsApiKeyIds(array)[Usage], + UsageCompletionsApiKeyIdsItem(string)[Usage], + UsageCompletionsModels(array)[Usage], + UsageCompletionsModelsItem(string)[Usage], + UsageCompletionsBatch(bool)[Usage], + UsageCompletionsGroupBy(array)[Usage], + UsageCompletionsGroupByItem(enum)[Usage], + UsageCompletionsLimit(int)[Usage], + UsageCompletionsPage(string)[Usage], + UsageEmbeddingsStartTime(int)[Usage], + UsageEmbeddingsEndTime(int)[Usage], + UsageEmbeddingsBucketWidth(enum)[Usage], + UsageEmbeddingsProjectIds(array)[Usage], + UsageEmbeddingsProjectIdsItem(string)[Usage], + UsageEmbeddingsUserIds(array)[Usage], + UsageEmbeddingsUserIdsItem(string)[Usage], + UsageEmbeddingsApiKeyIds(array)[Usage], + UsageEmbeddingsApiKeyIdsItem(string)[Usage], + UsageEmbeddingsModels(array)[Usage], + UsageEmbeddingsModelsItem(string)[Usage], + UsageEmbeddingsGroupBy(array)[Usage], + UsageEmbeddingsGroupByItem(enum)[Usage], + UsageEmbeddingsLimit(int)[Usage], + UsageEmbeddingsPage(string)[Usage], + UsageImagesStartTime(int)[Usage], + UsageImagesEndTime(int)[Usage], + UsageImagesBucketWidth(enum)[Usage], + UsageImagesSources(array)[Usage], + UsageImagesSource(enum)[Usage], + UsageImagesSizes(array)[Usage], + UsageImagesSize(enum)[Usage], + UsageImagesProjectIds(array)[Usage], + UsageImagesProjectIdsItem(string)[Usage], + UsageImagesUserIds(array)[Usage], + UsageImagesUserIdsItem(string)[Usage], + UsageImagesApiKeyIds(array)[Usage], + UsageImagesApiKeyIdsItem(string)[Usage], + UsageImagesModels(array)[Usage], + UsageImagesModelsItem(string)[Usage], + UsageImagesGroupBy(array)[Usage], + UsageImagesGroupByItem(enum)[Usage], + UsageImagesLimit(int)[Usage], + UsageImagesPage(string)[Usage], + UsageModerationsStartTime(int)[Usage], + UsageModerationsEndTime(int)[Usage], + UsageModerationsBucketWidth(enum)[Usage], + UsageModerationsProjectIds(array)[Usage], + UsageModerationsProjectIdsItem(string)[Usage], + UsageModerationsUserIds(array)[Usage], + UsageModerationsUserIdsItem(string)[Usage], + UsageModerationsApiKeyIds(array)[Usage], + UsageModerationsApiKeyIdsItem(string)[Usage], + UsageModerationsModels(array)[Usage], + UsageModerationsModelsItem(string)[Usage], + UsageModerationsGroupBy(array)[Usage], + UsageModerationsGroupByItem(enum)[Usage], + UsageModerationsLimit(int)[Usage], + UsageModerationsPage(string)[Usage], + UsageVectorStoresStartTime(int)[Usage], + UsageVectorStoresEndTime(int)[Usage], + UsageVectorStoresBucketWidth(enum)[Usage], + UsageVectorStoresProjectIds(array)[Usage], + UsageVectorStoresProjectIdsItem(string)[Usage], + UsageVectorStoresGroupBy(array)[Usage], + UsageVectorStoresGroupByItem(enum)[Usage], + UsageVectorStoresLimit(int)[Usage], + UsageVectorStoresPage(string)[Usage], + ListUsersLimit(int)[Users], + ListUsersAfter(string)[Users], + RetrieveUserUserId(string)[Users], + ModifyUserUserId(string)[Users], + DeleteUserUserId(string)[Users], GetThreadThreadId(string)[Assistants], ModifyThreadThreadId(string)[Assistants], DeleteThreadThreadId(string)[Assistants], @@ -1832,12 +2541,12 @@ ListRunsAfter(string)[Assistants], ListRunsBefore(string)[Assistants], CreateRunThreadId(string)[Assistants], + CreateRunInclude(array)[Assistants], + CreateRunIncludeItem(enum)[Assistants], GetRunThreadId(string)[Assistants], GetRunRunId(string)[Assistants], ModifyRunThreadId(string)[Assistants], ModifyRunRunId(string)[Assistants], - SubmitToolOuputsToRunThreadId(string)[Assistants], - SubmitToolOuputsToRunRunId(string)[Assistants], CancelRunThreadId(string)[Assistants], CancelRunRunId(string)[Assistants], ListRunStepsThreadId(string)[Assistants], @@ -1846,103 +2555,53 @@ ListRunStepsOrder(enum)[Assistants], ListRunStepsAfter(string)[Assistants], ListRunStepsBefore(string)[Assistants], + ListRunStepsInclude(array)[Assistants], + ListRunStepsIncludeItem(enum)[Assistants], GetRunStepThreadId(string)[Assistants], GetRunStepRunId(string)[Assistants], GetRunStepStepId(string)[Assistants], - ListVectorStoresLimit(int)[Vector Stores], - ListVectorStoresOrder(enum)[Vector Stores], - ListVectorStoresAfter(string)[Vector Stores], - ListVectorStoresBefore(string)[Vector Stores], - GetVectorStoreVectorStoreId(string)[Vector Stores], - ModifyVectorStoreVectorStoreId(string)[Vector Stores], - DeleteVectorStoreVectorStoreId(string)[Vector Stores], - ListVectorStoreFilesVectorStoreId(string)[Vector Stores], - ListVectorStoreFilesLimit(int)[Vector Stores], - ListVectorStoreFilesOrder(enum)[Vector Stores], - ListVectorStoreFilesAfter(string)[Vector Stores], - ListVectorStoreFilesBefore(string)[Vector Stores], - ListVectorStoreFilesFilter(enum)[Vector Stores], - CreateVectorStoreFileVectorStoreId(string)[Vector Stores], - GetVectorStoreFileVectorStoreId(string)[Vector Stores], - GetVectorStoreFileFileId(string)[Vector Stores], - DeleteVectorStoreFileVectorStoreId(string)[Vector Stores], - DeleteVectorStoreFileFileId(string)[Vector Stores], - CreateVectorStoreFileBatchVectorStoreId(string)[Vector Stores], - GetVectorStoreFileBatchVectorStoreId(string)[Vector Stores], - GetVectorStoreFileBatchBatchId(string)[Vector Stores], - CancelVectorStoreFileBatchVectorStoreId(string)[Vector Stores], - CancelVectorStoreFileBatchBatchId(string)[Vector Stores], - ListFilesInVectorStoreBatchVectorStoreId(string)[Vector Stores], - ListFilesInVectorStoreBatchBatchId(string)[Vector Stores], - ListFilesInVectorStoreBatchLimit(int)[Vector Stores], - ListFilesInVectorStoreBatchOrder(enum)[Vector Stores], - ListFilesInVectorStoreBatchAfter(string)[Vector Stores], - ListFilesInVectorStoreBatchBefore(string)[Vector Stores], - ListFilesInVectorStoreBatchFilter(enum)[Vector Stores], - ListBatchesAfter(string)[Batch], - ListBatchesLimit(int)[Batch], - RetrieveBatchBatchId(string)[Batch], - CancelBatchBatchId(string)[Batch], - ListAuditLogsEffectiveAt(class)[Audit Logs], - Gt(int)[Audit Logs], - Gte(int)[Audit Logs], - Lt(int)[Audit Logs], - Lte(int)[Audit Logs], - ListAuditLogsProjectIds(array)[Audit Logs], - ListAuditLogsProjectIdsItem(string)[Audit Logs], - ListAuditLogsEventTypes(array)[Audit Logs], - AuditLogEventType(ref)[Audit Logs], - ListAuditLogsActorIds(array)[Audit Logs], - ListAuditLogsActorIdsItem(string)[Audit Logs], - ListAuditLogsActorEmails(array)[Audit Logs], - ListAuditLogsActorEmailsItem(string)[Audit Logs], - ListAuditLogsResourceIds(array)[Audit Logs], - ListAuditLogsResourceIdsItem(string)[Audit Logs], - ListAuditLogsLimit(int)[Audit Logs], - ListAuditLogsAfter(string)[Audit Logs], - ListAuditLogsBefore(string)[Audit Logs], - ListInvitesLimit(int)[Invites], - ListInvitesAfter(string)[Invites], - RetrieveInviteInviteId(string)[Invites], - DeleteInviteInviteId(string)[Invites], - ListUsersLimit(int)[Users], - ListUsersAfter(string)[Users], - RetrieveUserUserId(string)[Users], - DeleteUserUserId(string)[Users], - ListProjectsLimit(int)[Projects], - ListProjectsAfter(string)[Projects], - ListProjectsIncludeArchived(bool)[Projects], - RetrieveProjectProjectId(string)[Projects], - ArchiveProjectProjectId(string)[Projects], - ListProjectUsersProjectId(string)[Projects], - ListProjectUsersLimit(int)[Projects], - ListProjectUsersAfter(string)[Projects], - CreateProjectUserProjectId(string)[Projects], - RetrieveProjectUserProjectId(string)[Projects], - RetrieveProjectUserUserId(string)[Projects], - DeleteProjectUserProjectId(string)[Projects], - DeleteProjectUserUserId(string)[Projects], - ListProjectServiceAccountsProjectId(string)[Projects], - ListProjectServiceAccountsLimit(int)[Projects], - ListProjectServiceAccountsAfter(string)[Projects], - CreateProjectServiceAccountProjectId(string)[Projects], - RetrieveProjectServiceAccountProjectId(string)[Projects], - RetrieveProjectServiceAccountServiceAccountId(string)[Projects], - DeleteProjectServiceAccountProjectId(string)[Projects], - DeleteProjectServiceAccountServiceAccountId(string)[Projects], - ListProjectApiKeysProjectId(string)[Projects], - ListProjectApiKeysLimit(int)[Projects], - ListProjectApiKeysAfter(string)[Projects], - RetrieveProjectApiKeyProjectId(string)[Projects], - RetrieveProjectApiKeyKeyId(string)[Projects], - DeleteProjectApiKeyProjectId(string)[Projects], - DeleteProjectApiKeyKeyId(string)[Projects], - CreateChatCompletionResponse(ref)[Chat], - CreateCompletionResponse(ref)[Completions], - ImagesResponse(ref)[Images], - ImagesResponse(ref)[Images], - ImagesResponse(ref)[Images], - CreateEmbeddingResponse(ref)[Embeddings], + GetRunStepInclude(array)[Assistants], + GetRunStepIncludeItem(enum)[Assistants], + SubmitToolOuputsToRunThreadId(string)[Assistants], + SubmitToolOuputsToRunRunId(string)[Assistants], + CancelUploadUploadId(string)[Uploads], + CompleteUploadUploadId(string)[Uploads], + AddUploadPartUploadId(string)[Uploads], + ListVectorStoresLimit(int)[Vector stores], + ListVectorStoresOrder(enum)[Vector stores], + ListVectorStoresAfter(string)[Vector stores], + ListVectorStoresBefore(string)[Vector stores], + GetVectorStoreVectorStoreId(string)[Vector stores], + ModifyVectorStoreVectorStoreId(string)[Vector stores], + DeleteVectorStoreVectorStoreId(string)[Vector stores], + CreateVectorStoreFileBatchVectorStoreId(string)[Vector stores], + GetVectorStoreFileBatchVectorStoreId(string)[Vector stores], + GetVectorStoreFileBatchBatchId(string)[Vector stores], + CancelVectorStoreFileBatchVectorStoreId(string)[Vector stores], + CancelVectorStoreFileBatchBatchId(string)[Vector stores], + ListFilesInVectorStoreBatchVectorStoreId(string)[Vector stores], + ListFilesInVectorStoreBatchBatchId(string)[Vector stores], + ListFilesInVectorStoreBatchLimit(int)[Vector stores], + ListFilesInVectorStoreBatchOrder(enum)[Vector stores], + ListFilesInVectorStoreBatchAfter(string)[Vector stores], + ListFilesInVectorStoreBatchBefore(string)[Vector stores], + ListFilesInVectorStoreBatchFilter(enum)[Vector stores], + ListVectorStoreFilesVectorStoreId(string)[Vector stores], + ListVectorStoreFilesLimit(int)[Vector stores], + ListVectorStoreFilesOrder(enum)[Vector stores], + ListVectorStoreFilesAfter(string)[Vector stores], + ListVectorStoreFilesBefore(string)[Vector stores], + ListVectorStoreFilesFilter(enum)[Vector stores], + CreateVectorStoreFileVectorStoreId(string)[Vector stores], + GetVectorStoreFileVectorStoreId(string)[Vector stores], + GetVectorStoreFileFileId(string)[Vector stores], + DeleteVectorStoreFileVectorStoreId(string)[Vector stores], + DeleteVectorStoreFileFileId(string)[Vector stores], + ListAssistantsResponse(ref)[Assistants], + AssistantObject(ref)[Assistants], + AssistantObject(ref)[Assistants], + AssistantObject(ref)[Assistants], + DeleteAssistantResponse(ref)[Assistants], CreateSpeechResponse(byte[])[Audio], CreateTranscriptionResponse(oneOf)[Audio], CreateTranscriptionResponseJson(ref)[Audio], @@ -1950,80 +2609,56 @@ CreateTranslationResponse(oneOf)[Audio], CreateTranslationResponseJson(ref)[Audio], CreateTranslationResponseVerboseJson(ref)[Audio], + Batch(ref)[Batch], + ListBatchesResponse(ref)[Batch], + Batch(ref)[Batch], + Batch(ref)[Batch], + CreateChatCompletionResponse(ref)[Chat], + CreateCompletionResponse(ref)[Completions], + CreateEmbeddingResponse(ref)[Embeddings], ListFilesResponse(ref)[Files], OpenAIFile(ref)[Files], DeleteFileResponse(ref)[Files], OpenAIFile(ref)[Files], DownloadFileResponse(string)[Files], - Upload(ref)[Uploads], - UploadPart(ref)[Uploads], - Upload(ref)[Uploads], - Upload(ref)[Uploads], FineTuningJob(ref)[Fine-tuning], ListPaginatedFineTuningJobsResponse(ref)[Fine-tuning], FineTuningJob(ref)[Fine-tuning], - ListFineTuningJobEventsResponse(ref)[Fine-tuning], FineTuningJob(ref)[Fine-tuning], ListFineTuningJobCheckpointsResponse(ref)[Fine-tuning], + ListFineTuningJobEventsResponse(ref)[Fine-tuning], + ImagesResponse(ref)[Images], + ImagesResponse(ref)[Images], + ImagesResponse(ref)[Images], ListModelsResponse(ref)[Models], - Model12(ref)[Models], + Model15(ref)[Models], DeleteModelResponse(ref)[Models], CreateModerationResponse(ref)[Moderations], - ListAssistantsResponse(ref)[Assistants], - AssistantObject(ref)[Assistants], - AssistantObject(ref)[Assistants], - AssistantObject(ref)[Assistants], - DeleteAssistantResponse(ref)[Assistants], - ThreadObject(ref)[Assistants], - ThreadObject(ref)[Assistants], - ThreadObject(ref)[Assistants], - DeleteThreadResponse(ref)[Assistants], - ListMessagesResponse(ref)[Assistants], - MessageObject(ref)[Assistants], - MessageObject(ref)[Assistants], - MessageObject(ref)[Assistants], - DeleteMessageResponse(ref)[Assistants], - RunObject(ref)[Assistants], - ListRunsResponse(ref)[Assistants], - RunObject(ref)[Assistants], - RunObject(ref)[Assistants], - RunObject(ref)[Assistants], - RunObject(ref)[Assistants], - RunObject(ref)[Assistants], - ListRunStepsResponse(ref)[Assistants], - RunStepObject(ref)[Assistants], - ListVectorStoresResponse(ref)[Vector Stores], - VectorStoreObject(ref)[Vector Stores], - VectorStoreObject(ref)[Vector Stores], - VectorStoreObject(ref)[Vector Stores], - DeleteVectorStoreResponse(ref)[Vector Stores], - ListVectorStoreFilesResponse(ref)[Vector Stores], - VectorStoreFileObject(ref)[Vector Stores], - VectorStoreFileObject(ref)[Vector Stores], - DeleteVectorStoreFileResponse(ref)[Vector Stores], - VectorStoreFileBatchObject(ref)[Vector Stores], - VectorStoreFileBatchObject(ref)[Vector Stores], - VectorStoreFileBatchObject(ref)[Vector Stores], - ListVectorStoreFilesResponse(ref)[Vector Stores], - Batch(ref)[Batch], - ListBatchesResponse(ref)[Batch], - Batch(ref)[Batch], - Batch(ref)[Batch], ListAuditLogsResponse(ref)[Audit Logs], + UsageResponse(ref)[Usage], InviteListResponse(ref)[Invites], Invite(ref)[Invites], Invite(ref)[Invites], InviteDeleteResponse(ref)[Invites], - UserListResponse(ref)[Users], - User(ref)[Users], - User(ref)[Users], - UserDeleteResponse(ref)[Users], ProjectListResponse(ref)[Projects], Project(ref)[Projects], Project(ref)[Projects], Project(ref)[Projects], ErrorResponse(ref)[Projects], + ProjectApiKeyListResponse(ref)[Projects], + ProjectApiKey(ref)[Projects], + ProjectApiKeyDeleteResponse(ref)[Projects], + ErrorResponse(ref)[Projects], Project(ref)[Projects], + ProjectRateLimitListResponse(ref)[Projects], + ProjectRateLimit(ref)[Projects], + ErrorResponse(ref)[Projects], + ProjectServiceAccountListResponse(ref)[Projects], + ErrorResponse(ref)[Projects], + ProjectServiceAccountCreateResponse(ref)[Projects], + ErrorResponse(ref)[Projects], + ProjectServiceAccount(ref)[Projects], + ProjectServiceAccountDeleteResponse(ref)[Projects], ProjectUserListResponse(ref)[Projects], ErrorResponse(ref)[Projects], ProjectUser(ref)[Projects], @@ -2033,14 +2668,51 @@ ErrorResponse(ref)[Projects], ProjectUserDeleteResponse(ref)[Projects], ErrorResponse(ref)[Projects], - ProjectServiceAccountListResponse(ref)[Projects], - ErrorResponse(ref)[Projects], - ProjectServiceAccountCreateResponse(ref)[Projects], - ErrorResponse(ref)[Projects], - ProjectServiceAccount(ref)[Projects], - ProjectServiceAccountDeleteResponse(ref)[Projects], - ProjectApiKeyListResponse(ref)[Projects], - ProjectApiKey(ref)[Projects], - ProjectApiKeyDeleteResponse(ref)[Projects], - ErrorResponse(ref)[Projects] + UsageResponse(ref)[Usage], + UsageResponse(ref)[Usage], + UsageResponse(ref)[Usage], + UsageResponse(ref)[Usage], + UsageResponse(ref)[Usage], + UsageResponse(ref)[Usage], + UsageResponse(ref)[Usage], + UsageResponse(ref)[Usage], + UserListResponse(ref)[Users], + User(ref)[Users], + User(ref)[Users], + UserDeleteResponse(ref)[Users], + ThreadObject(ref)[Assistants], + RunObject(ref)[Assistants], + ThreadObject(ref)[Assistants], + ThreadObject(ref)[Assistants], + DeleteThreadResponse(ref)[Assistants], + ListMessagesResponse(ref)[Assistants], + MessageObject(ref)[Assistants], + MessageObject(ref)[Assistants], + MessageObject(ref)[Assistants], + DeleteMessageResponse(ref)[Assistants], + ListRunsResponse(ref)[Assistants], + RunObject(ref)[Assistants], + RunObject(ref)[Assistants], + RunObject(ref)[Assistants], + RunObject(ref)[Assistants], + ListRunStepsResponse(ref)[Assistants], + RunStepObject(ref)[Assistants], + RunObject(ref)[Assistants], + Upload(ref)[Uploads], + Upload(ref)[Uploads], + Upload(ref)[Uploads], + UploadPart(ref)[Uploads], + ListVectorStoresResponse(ref)[Vector stores], + VectorStoreObject(ref)[Vector stores], + VectorStoreObject(ref)[Vector stores], + VectorStoreObject(ref)[Vector stores], + DeleteVectorStoreResponse(ref)[Vector stores], + VectorStoreFileBatchObject(ref)[Vector stores], + VectorStoreFileBatchObject(ref)[Vector stores], + VectorStoreFileBatchObject(ref)[Vector stores], + ListVectorStoreFilesResponse(ref)[Vector stores], + ListVectorStoreFilesResponse(ref)[Vector stores], + VectorStoreFileObject(ref)[Vector stores], + VectorStoreFileObject(ref)[Vector stores], + DeleteVectorStoreFileResponse(ref)[Vector stores] ] \ No newline at end of file diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/Schemas/_.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/Schemas/_.verified.txt index fef4fc181f..031abe4ab7 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/Schemas/_.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/OpenAi/Schemas/_.verified.txt @@ -1,85 +1,334 @@ [ - Error(class), - Code(string), - Message(string), - Param(string), - Type(string), - ErrorResponse(class), - Error(ref), - ListModelsResponse(class), - ListModelsResponseObject(enum), - Data(array), - Model12(ref), - DeleteModelResponse(class), + AddUploadPartRequest(class), + Data(byte[]), + AssistantObject(class), Id(string), - Deleted(bool), - Object(string), - CreateCompletionRequest(class), - Model(anyOf), - ModelVariant1(string), - CreateCompletionRequestModel(enum), - Prompt(oneOf), - PromptVariant1(string), - PromptVariant2(array), - PromptVariant2Item(string), - PromptVariant3(array), - PromptVariant3Item(int), - PromptVariant4(array), - PromptVariant4Item(array), - PromptVariant4ItemItem(int), - BestOf(int), - Echo(bool), - FrequencyPenalty(double), - CreateCompletionRequestLogitBias(class), - LogitBias(int), - Logprobs(int), - MaxTokens(int), - N(int), - PresencePenalty(double), - Seed(int), - Stop(oneOf), - StopVariant1(string), - StopVariant2(array), - StopVariant2Item(string), - Stream(bool), - ChatCompletionStreamOptions(ref), - Suffix(string), + AssistantObjectObject(enum), + CreatedAt(int), + Name(string), + Description(string), + Model(string), + Instructions(string), + Tools(array), + ToolsItem(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearch(ref), + AssistantToolsFunction(ref), + AssistantObjectToolDiscriminator(class), + AssistantObjectToolDiscriminatorType(enum), + AssistantObjectToolResources(class), + AssistantObjectToolResourcesCodeInterpreter(class), + FileIds(array), + FileIdsItem(string), + AssistantObjectToolResourcesFileSearch(class), + VectorStoreIds(array), + VectorStoreIdsItem(string), + AssistantObjectMetadata(class), Temperature(double), TopP(double), - User(string), - CreateCompletionResponse(class), + AssistantsApiResponseFormatOption(ref), + AssistantStreamEvent(oneOf), + ErrorEvent(ref), + DoneEvent(ref), + AssistantStreamEventVariant3(class), + Enabled(bool), + AssistantStreamEventVariant3Event(enum), + ThreadObject(ref), + AssistantStreamEventVariant4(class), + AssistantStreamEventVariant4Event(enum), + RunObject(ref), + AssistantStreamEventVariant5(class), + AssistantStreamEventVariant5Event(enum), + RunObject(ref), + AssistantStreamEventVariant6(class), + AssistantStreamEventVariant6Event(enum), + RunObject(ref), + AssistantStreamEventVariant7(class), + AssistantStreamEventVariant7Event(enum), + RunObject(ref), + AssistantStreamEventVariant8(class), + AssistantStreamEventVariant8Event(enum), + RunObject(ref), + AssistantStreamEventVariant9(class), + AssistantStreamEventVariant9Event(enum), + RunObject(ref), + AssistantStreamEventVariant10(class), + AssistantStreamEventVariant10Event(enum), + RunObject(ref), + AssistantStreamEventVariant11(class), + AssistantStreamEventVariant11Event(enum), + RunObject(ref), + AssistantStreamEventVariant12(class), + AssistantStreamEventVariant12Event(enum), + RunObject(ref), + AssistantStreamEventVariant13(class), + AssistantStreamEventVariant13Event(enum), + RunObject(ref), + AssistantStreamEventVariant14(class), + AssistantStreamEventVariant14Event(enum), + RunStepObject(ref), + AssistantStreamEventVariant15(class), + AssistantStreamEventVariant15Event(enum), + RunStepObject(ref), + AssistantStreamEventVariant16(class), + AssistantStreamEventVariant16Event(enum), + RunStepDeltaObject(ref), + AssistantStreamEventVariant17(class), + AssistantStreamEventVariant17Event(enum), + RunStepObject(ref), + AssistantStreamEventVariant18(class), + AssistantStreamEventVariant18Event(enum), + RunStepObject(ref), + AssistantStreamEventVariant19(class), + AssistantStreamEventVariant19Event(enum), + RunStepObject(ref), + AssistantStreamEventVariant20(class), + AssistantStreamEventVariant20Event(enum), + RunStepObject(ref), + AssistantStreamEventVariant21(class), + AssistantStreamEventVariant21Event(enum), + MessageObject(ref), + AssistantStreamEventVariant22(class), + AssistantStreamEventVariant22Event(enum), + MessageObject(ref), + AssistantStreamEventVariant23(class), + AssistantStreamEventVariant23Event(enum), + MessageDeltaObject(ref), + AssistantStreamEventVariant24(class), + AssistantStreamEventVariant24Event(enum), + MessageObject(ref), + AssistantStreamEventVariant25(class), + AssistantStreamEventVariant25Event(enum), + MessageObject(ref), + AssistantStreamEventDiscriminator(class), + AssistantStreamEventDiscriminatorEvent(enum), + AssistantToolsCode(class), + AssistantToolsCodeType(enum), + AssistantToolsFileSearch(class), + AssistantToolsFileSearchType(enum), + AssistantToolsFileSearchFileSearch(class), + MaxNumResults(int), + FileSearchRankingOptions(ref), + AssistantToolsFileSearchTypeOnly(class), + AssistantToolsFileSearchTypeOnlyType(enum), + AssistantToolsFunction(class), + AssistantToolsFunctionType(enum), + FunctionObject(ref), + AssistantsApiResponseFormatOption(oneOf), + AssistantsApiResponseFormatOptionEnum(enum), + ResponseFormatText(ref), + ResponseFormatJsonObject(ref), + ResponseFormatJsonSchema(ref), + AssistantsApiToolChoiceOption(oneOf), + AssistantsApiToolChoiceOptionEnum(enum), + AssistantsNamedToolChoice(ref), + AssistantsNamedToolChoice(class), + AssistantsNamedToolChoiceType(enum), + AssistantsNamedToolChoiceFunction(class), + Name(string), + AudioResponseFormat(enum), + AuditLog(class), Id(string), - Choices(array), - CreateCompletionResponseChoice(class), - CreateCompletionResponseChoiceFinishReason(enum), - Index(int), - CreateCompletionResponseChoiceLogprobs(class), - TextOffset(array), - TextOffsetItem(int), - TokenLogprobs(array), - TokenLogprobsItem(double), - Tokens(array), - TokensItem(string), - TopLogprobs(array), - CreateCompletionResponseChoiceLogprobsTopLogprob(class), - TopLogprobsItem(double), - Text(string), - Created(int), - Model(string), - SystemFingerprint(string), - CreateCompletionResponseObject(enum), - CompletionUsage(ref), - ChatCompletionRequestMessageContentPartText(class), - ChatCompletionRequestMessageContentPartTextType(enum), - Text(string), - ChatCompletionRequestMessageContentPartImage(class), - ChatCompletionRequestMessageContentPartImageType(enum), - ChatCompletionRequestMessageContentPartImageImageUrl(class), - Url(Uri), - ChatCompletionRequestMessageContentPartImageImageUrlDetail(enum), - ChatCompletionRequestMessageContentPartRefusal(class), - ChatCompletionRequestMessageContentPartRefusalType(enum), + AuditLogEventType(ref), + EffectiveAt(int), + AuditLogProject(class), + Id(string), + Name(string), + AuditLogActor(ref), + AuditLogApiKeyCreated(class), + Id(string), + AuditLogApiKeyCreatedData(class), + Scopes(array), + ScopesItem(string), + AuditLogApiKeyUpdated(class), + Id(string), + AuditLogApiKeyUpdatedChangesRequested(class), + Scopes(array), + ScopesItem(string), + AuditLogApiKeyDeleted(class), + Id(string), + AuditLogInviteSent(class), + Id(string), + AuditLogInviteSentData(class), + Email(string), + Role(string), + AuditLogInviteAccepted(class), + Id(string), + AuditLogInviteDeleted(class), + Id(string), + AuditLogLoginFailed(class), + ErrorCode(string), + ErrorMessage(string), + AuditLogLogoutFailed(class), + ErrorCode(string), + ErrorMessage(string), + AuditLogOrganizationUpdated(class), + Id(string), + AuditLogOrganizationUpdatedChangesRequested(class), + Title(string), + Description(string), + Name(string), + AuditLogOrganizationUpdatedChangesRequestedSettings(class), + ThreadsUiVisibility(string), + UsageDashboardVisibility(string), + AuditLogProjectCreated(class), + Id(string), + AuditLogProjectCreatedData(class), + Name(string), + Title(string), + AuditLogProjectUpdated(class), + Id(string), + AuditLogProjectUpdatedChangesRequested(class), + Title(string), + AuditLogProjectArchived(class), + Id(string), + AuditLogRateLimitUpdated(class), + Id(string), + AuditLogRateLimitUpdatedChangesRequested(class), + MaxRequestsPer1Minute(int), + MaxTokensPer1Minute(int), + MaxImagesPer1Minute(int), + MaxAudioMegabytesPer1Minute(int), + MaxRequestsPer1Day(int), + Batch1DayMaxInputTokens(int), + AuditLogRateLimitDeleted(class), + Id(string), + AuditLogServiceAccountCreated(class), + Id(string), + AuditLogServiceAccountCreatedData(class), + Role(string), + AuditLogServiceAccountUpdated(class), + Id(string), + AuditLogServiceAccountUpdatedChangesRequested(class), + Role(string), + AuditLogServiceAccountDeleted(class), + Id(string), + AuditLogUserAdded(class), + Id(string), + AuditLogUserAddedData(class), + Role(string), + AuditLogUserUpdated(class), + Id(string), + AuditLogUserUpdatedChangesRequested(class), + Role(string), + AuditLogUserDeleted(class), + Id(string), + AuditLogActor(class), + AuditLogActorType(enum), + AuditLogActorSession(ref), + AuditLogActorApiKey(ref), + AuditLogActorApiKey(class), + Id(string), + AuditLogActorApiKeyType(enum), + AuditLogActorUser(ref), + AuditLogActorServiceAccount(ref), + AuditLogActorServiceAccount(class), + Id(string), + AuditLogActorSession(class), + AuditLogActorUser(ref), + IpAddress(string), + AuditLogActorUser(class), + Id(string), + Email(string), + AuditLogEventType(enum), + AutoChunkingStrategyRequestParam(class), + AutoChunkingStrategyRequestParamType(enum), + Batch(class), + Id(string), + BatchObject(enum), + Endpoint(string), + BatchErrors(class), + Object(string), + Data(array), + BatchErrorsDataItem(class), + Code(string), + Message(string), + Param(string), + Line(int), + InputFileId(string), + CompletionWindow(string), + BatchStatus(enum), + OutputFileId(string), + ErrorFileId(string), + CreatedAt(int), + InProgressAt(int), + ExpiresAt(int), + FinalizingAt(int), + CompletedAt(int), + FailedAt(int), + ExpiredAt(int), + CancellingAt(int), + CancelledAt(int), + BatchRequestCounts(class), + Total(int), + Completed(int), + Failed(int), + BatchMetadata(class), + BatchRequestInput(class), + CustomId(string), + BatchRequestInputMethod(enum), + Url(string), + BatchRequestOutput(class), + Id(string), + CustomId(string), + BatchRequestOutputResponse(class), + StatusCode(int), + RequestId(string), + BatchRequestOutputResponseBody(class), + BatchRequestOutputError(class), + Code(string), + Message(string), + CancelUploadRequest(class), + ChatCompletionFunctionCallOption(class), + Name(string), + ChatCompletionFunctions(class), + Description(string), + Name(string), + FunctionParameters(ref), + ChatCompletionMessageToolCall(class), + Id(string), + ChatCompletionMessageToolCallType(enum), + ChatCompletionMessageToolCallFunction(class), + Name(string), + Arguments(string), + ChatCompletionMessageToolCallChunk(class), + Index(int), + Id(string), + ChatCompletionMessageToolCallChunkType(enum), + ChatCompletionMessageToolCallChunkFunction(class), + Name(string), + Arguments(string), + ChatCompletionMessageToolCalls(array), + ChatCompletionMessageToolCall(ref), + ChatCompletionModalities(array), + ChatCompletionModalitie(enum), + ChatCompletionNamedToolChoice(class), + ChatCompletionNamedToolChoiceType(enum), + ChatCompletionNamedToolChoiceFunction(class), + Name(string), + ChatCompletionRequestAssistantMessage(class), + Content(oneOf), + ContentVariant1(string), + ContentVariant2(array), + ChatCompletionRequestAssistantMessageContentPart(ref), Refusal(string), + ChatCompletionRequestAssistantMessageRole(enum), + Name(string), + ChatCompletionRequestAssistantMessageAudio(class), + Id(string), + ChatCompletionMessageToolCalls(ref), + ChatCompletionRequestAssistantMessageFunctionCall(class), + Arguments(string), + Name(string), + ChatCompletionRequestAssistantMessageContentPart(oneOf), + ChatCompletionRequestMessageContentPartText(ref), + ChatCompletionRequestMessageContentPartRefusal(ref), + ChatCompletionRequestAssistantMessageContentPartDiscriminator(class), + ChatCompletionRequestAssistantMessageContentPartDiscriminatorType(enum), + ChatCompletionRequestFunctionMessage(class), + ChatCompletionRequestFunctionMessageRole(enum), + Content(string), + Name(string), ChatCompletionRequestMessage(oneOf), ChatCompletionRequestSystemMessage(ref), ChatCompletionRequestUserMessage(ref), @@ -88,114 +337,53 @@ ChatCompletionRequestFunctionMessage(ref), ChatCompletionRequestMessageDiscriminator(class), ChatCompletionRequestMessageDiscriminatorRole(enum), - ChatCompletionRequestSystemMessageContentPart(oneOf), - ChatCompletionRequestMessageContentPartText(ref), - ChatCompletionRequestUserMessageContentPart(oneOf), - ChatCompletionRequestMessageContentPartText(ref), - ChatCompletionRequestMessageContentPartImage(ref), - ChatCompletionRequestUserMessageContentPartDiscriminator(class), - ChatCompletionRequestUserMessageContentPartDiscriminatorType(enum), - ChatCompletionRequestAssistantMessageContentPart(oneOf), - ChatCompletionRequestMessageContentPartText(ref), - ChatCompletionRequestMessageContentPartRefusal(ref), - ChatCompletionRequestAssistantMessageContentPartDiscriminator(class), - ChatCompletionRequestAssistantMessageContentPartDiscriminatorType(enum), - ChatCompletionRequestToolMessageContentPart(oneOf), - ChatCompletionRequestMessageContentPartText(ref), + ChatCompletionRequestMessageContentPartAudio(class), + ChatCompletionRequestMessageContentPartAudioType(enum), + ChatCompletionRequestMessageContentPartAudioInputAudio(class), + Data(string), + ChatCompletionRequestMessageContentPartAudioInputAudioFormat(enum), + ChatCompletionRequestMessageContentPartImage(class), + ChatCompletionRequestMessageContentPartImageType(enum), + ChatCompletionRequestMessageContentPartImageImageUrl(class), + Url(Uri), + ChatCompletionRequestMessageContentPartImageImageUrlDetail(enum), + ChatCompletionRequestMessageContentPartRefusal(class), + ChatCompletionRequestMessageContentPartRefusalType(enum), + Refusal(string), + ChatCompletionRequestMessageContentPartText(class), + ChatCompletionRequestMessageContentPartTextType(enum), + Text(string), ChatCompletionRequestSystemMessage(class), - Content(oneOf), + Content2(oneOf), ContentVariant1(string), ContentVariant2(array), ChatCompletionRequestSystemMessageContentPart(ref), ChatCompletionRequestSystemMessageRole(enum), Name(string), - ChatCompletionRequestUserMessage(class), - Content2(oneOf), - ContentVariant1(string), - ContentVariant2(array), - ChatCompletionRequestUserMessageContentPart(ref), - ChatCompletionRequestUserMessageRole(enum), - Name(string), - ChatCompletionRequestAssistantMessage(class), - Content3(oneOf), - ContentVariant1(string), - ContentVariant2(array), - ChatCompletionRequestAssistantMessageContentPart(ref), - Refusal(string), - ChatCompletionRequestAssistantMessageRole(enum), - Name(string), - ChatCompletionMessageToolCalls(ref), - ChatCompletionRequestAssistantMessageFunctionCall(class), - Arguments(string), - Name(string), - FineTuneChatCompletionRequestAssistantMessage(allOf), - FineTuneChatCompletionRequestAssistantMessageVariant1(class), - Weight(int), - ChatCompletionRequestAssistantMessage(ref), + ChatCompletionRequestSystemMessageContentPart(oneOf), + ChatCompletionRequestMessageContentPartText(ref), ChatCompletionRequestToolMessage(class), ChatCompletionRequestToolMessageRole(enum), - Content4(oneOf), + Content3(oneOf), ContentVariant1(string), ContentVariant2(array), ChatCompletionRequestToolMessageContentPart(ref), ToolCallId(string), - ChatCompletionRequestFunctionMessage(class), - ChatCompletionRequestFunctionMessageRole(enum), - Content(string), - Name(string), - FunctionParameters(class), - ChatCompletionFunctions(class), - Description(string), - Name(string), - FunctionParameters(ref), - ChatCompletionFunctionCallOption(class), - Name(string), - ChatCompletionTool(class), - ChatCompletionToolType(enum), - FunctionObject(ref), - FunctionObject(class), - Description(string), + ChatCompletionRequestToolMessageContentPart(oneOf), + ChatCompletionRequestMessageContentPartText(ref), + ChatCompletionRequestUserMessage(class), + Content4(oneOf), + ContentVariant1(string), + ContentVariant2(array), + ChatCompletionRequestUserMessageContentPart(ref), + ChatCompletionRequestUserMessageRole(enum), Name(string), - FunctionParameters(ref), - Strict(bool), - ResponseFormatText(class), - ResponseFormatTextType(enum), - ResponseFormatJsonObject(class), - ResponseFormatJsonObjectType(enum), - ResponseFormatJsonSchemaSchema(class), - ResponseFormatJsonSchema(class), - ResponseFormatJsonSchemaType(enum), - ResponseFormatJsonSchemaJsonSchema(class), - Description(string), - Name(string), - ResponseFormatJsonSchemaSchema(ref), - Strict(bool), - ChatCompletionToolChoiceOption(oneOf), - ChatCompletionToolChoiceOptionEnum(enum), - ChatCompletionNamedToolChoice(ref), - ChatCompletionNamedToolChoice(class), - ChatCompletionNamedToolChoiceType(enum), - ChatCompletionNamedToolChoiceFunction(class), - Name(string), - ParallelToolCalls(bool), - ChatCompletionMessageToolCalls(array), - ChatCompletionMessageToolCall(ref), - ChatCompletionMessageToolCall(class), - Id(string), - ChatCompletionMessageToolCallType(enum), - ChatCompletionMessageToolCallFunction(class), - Name(string), - Arguments(string), - ChatCompletionMessageToolCallChunk(class), - Index(int), - Id(string), - ChatCompletionMessageToolCallChunkType(enum), - ChatCompletionMessageToolCallChunkFunction(class), - Name(string), - Arguments(string), - ChatCompletionRole(enum), - ChatCompletionStreamOptions(class), - IncludeUsage(bool), + ChatCompletionRequestUserMessageContentPart(oneOf), + ChatCompletionRequestMessageContentPartText(ref), + ChatCompletionRequestMessageContentPartImage(ref), + ChatCompletionRequestMessageContentPartAudio(ref), + ChatCompletionRequestUserMessageContentPartDiscriminator(class), + ChatCompletionRequestUserMessageContentPartDiscriminatorType(enum), ChatCompletionResponseMessage(class), Content(string), Refusal(string), @@ -204,6 +392,14 @@ ChatCompletionResponseMessageFunctionCall(class), Arguments(string), Name(string), + ChatCompletionResponseMessageAudio(class), + Id(string), + ExpiresAt(int), + Data(string), + Transcript(string), + ChatCompletionRole(enum), + ChatCompletionStreamOptions(class), + IncludeUsage(bool), ChatCompletionStreamResponseDelta(class), Content(string), ChatCompletionStreamResponseDeltaFunctionCall(class), @@ -213,19 +409,129 @@ ChatCompletionMessageToolCallChunk(ref), ChatCompletionStreamResponseDeltaRole(enum), Refusal(string), + ChatCompletionTokenLogprob(class), + Token(string), + Logprob(double), + Bytes(array), + BytesItem(int), + TopLogprobs(array), + ChatCompletionTokenLogprobTopLogprob(class), + Token(string), + Logprob(double), + Bytes(array), + BytesItem(int), + ChatCompletionTool(class), + ChatCompletionToolType(enum), + FunctionObject(ref), + ChatCompletionToolChoiceOption(oneOf), + ChatCompletionToolChoiceOptionEnum(enum), + ChatCompletionNamedToolChoice(ref), + ChunkingStrategyRequestParam(class), + AutoChunkingStrategyRequestParam(ref), + StaticChunkingStrategyRequestParam(ref), + ChunkingStrategyRequestParamDiscriminator(class), + ChunkingStrategyRequestParamDiscriminatorType(enum), + CompleteUploadRequest(class), + PartIds(array), + PartIdsItem(string), + Md5(string), + CompletionUsage(class), + CompletionTokens(int), + PromptTokens(int), + TotalTokens(int), + CompletionUsageCompletionTokensDetails(class), + AcceptedPredictionTokens(int), + AudioTokens(int), + ReasoningTokens(int), + RejectedPredictionTokens(int), + CompletionUsagePromptTokensDetails(class), + AudioTokens(int), + CachedTokens(int), + CostsResult(class), + CostsResultObject(enum), + CostsResultAmount(class), + Value(double), + Currency(string), + LineItem(string), + ProjectId(string), + CreateAssistantRequest(class), + Model(anyOf), + ModelVariant1(string), + CreateAssistantRequestModel(enum), + Name(string), + Description(string), + Instructions(string), + Tools(array), + ToolsItem2(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearch(ref), + AssistantToolsFunction(ref), + CreateAssistantRequestToolDiscriminator(class), + CreateAssistantRequestToolDiscriminatorType(enum), + CreateAssistantRequestToolResources(class), + CreateAssistantRequestToolResourcesCodeInterpreter(class), + FileIds(array), + FileIdsItem(string), + CreateAssistantRequestToolResourcesFileSearch(class), + VectorStoreIds(array), + VectorStoreIdsItem(string), + VectorStores(array), + CreateAssistantRequestToolResourcesFileSearchVectorStore(class), + FileIds(array), + FileIdsItem(string), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class), + MaxChunkSizeTokens(int), + ChunkOverlapTokens(int), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class), + CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum), + CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata(class), + CreateAssistantRequestToolResourcesFileSearchVariant1(class), + CreateAssistantRequestToolResourcesFileSearchVariant2(class), + CreateAssistantRequestMetadata(class), + Temperature(double), + TopP(double), + AssistantsApiResponseFormatOption(ref), + CreateChatCompletionFunctionResponse(class), + Id(string), + Choices(array), + CreateChatCompletionFunctionResponseChoice(class), + CreateChatCompletionFunctionResponseChoiceFinishReason(enum), + Index(int), + ChatCompletionResponseMessage(ref), + Created(int), + Model(string), + SystemFingerprint(string), + CreateChatCompletionFunctionResponseObject(enum), + CompletionUsage(ref), + CreateChatCompletionImageResponse(class), CreateChatCompletionRequest(class), Messages(array), ChatCompletionRequestMessage(ref), Model2(anyOf), ModelVariant1(string), CreateChatCompletionRequestModel(enum), + Store(bool), + CreateChatCompletionRequestMetadata(class), + Metadata(string), FrequencyPenalty(double), CreateChatCompletionRequestLogitBias(class), LogitBias(int), Logprobs(bool), TopLogprobs(int), MaxTokens(int), + MaxCompletionTokens(int), N(int), + ChatCompletionModalities(ref), + Prediction_AllOf1Wrapped(oneOf), + PredictionContent(ref), + CreateChatCompletionRequestAudio(class), + CreateChatCompletionRequestAudioVoice(enum), + CreateChatCompletionRequestAudioFormat(enum), PresencePenalty(double), ResponseFormat(oneOf), ResponseFormatText(ref), @@ -235,7 +541,7 @@ CreateChatCompletionRequestResponseFormatDiscriminatorType(enum), Seed(int), CreateChatCompletionRequestServiceTier(enum), - Stop2(oneOf), + Stop(oneOf), StopVariant1(string), StopVariant2(array), StopVariant2Item(string), @@ -271,34 +577,6 @@ SystemFingerprint(string), CreateChatCompletionResponseObject(enum), CompletionUsage(ref), - CreateChatCompletionFunctionResponse(class), - Id(string), - Choices(array), - CreateChatCompletionFunctionResponseChoice(class), - CreateChatCompletionFunctionResponseChoiceFinishReason(enum), - Index(int), - ChatCompletionResponseMessage(ref), - Created(int), - Model(string), - SystemFingerprint(string), - CreateChatCompletionFunctionResponseObject(enum), - CompletionUsage(ref), - ChatCompletionTokenLogprob(class), - Token(string), - Logprob(double), - Bytes(array), - BytesItem(int), - TopLogprobs(array), - ChatCompletionTokenLogprobTopLogprob(class), - Token(string), - Logprob(double), - Bytes(array), - BytesItem(int), - ListPaginatedFineTuningJobsResponse(class), - Data(array), - FineTuningJob(ref), - HasMore(bool), - ListPaginatedFineTuningJobsResponseObject(enum), CreateChatCompletionStreamResponse(class), Id(string), Choices(array), @@ -320,52 +598,185 @@ CompletionTokens(int), PromptTokens(int), TotalTokens(int), - CreateChatCompletionImageResponse(class), - CreateImageRequest(class), - Prompt(string), + CreateCompletionRequest(class), Model3(anyOf), ModelVariant1(string), - CreateImageRequestModel(enum), + CreateCompletionRequestModel(enum), + Prompt(oneOf), + PromptVariant1(string), + PromptVariant2(array), + PromptVariant2Item(string), + PromptVariant3(array), + PromptVariant3Item(int), + PromptVariant4(array), + PromptVariant4Item(array), + PromptVariant4ItemItem(int), + BestOf(int), + Echo(bool), + FrequencyPenalty(double), + CreateCompletionRequestLogitBias(class), + LogitBias(int), + Logprobs(int), + MaxTokens(int), N(int), - CreateImageRequestQuality(enum), - CreateImageRequestResponseFormat(enum), - CreateImageRequestSize(enum), - CreateImageRequestStyle(enum), + PresencePenalty(double), + Seed(int), + Stop2(oneOf), + StopVariant1(string), + StopVariant2(array), + StopVariant2Item(string), + Stream(bool), + ChatCompletionStreamOptions(ref), + Suffix(string), + Temperature(double), + TopP(double), User(string), - ImagesResponse(class), + CreateCompletionResponse(class), + Id(string), + Choices(array), + CreateCompletionResponseChoice(class), + CreateCompletionResponseChoiceFinishReason(enum), + Index(int), + CreateCompletionResponseChoiceLogprobs(class), + TextOffset(array), + TextOffsetItem(int), + TokenLogprobs(array), + TokenLogprobsItem(double), + Tokens(array), + TokensItem(string), + TopLogprobs(array), + CreateCompletionResponseChoiceLogprobsTopLogprob(class), + TopLogprobsItem(double), + Text(string), Created(int), + Model(string), + SystemFingerprint(string), + CreateCompletionResponseObject(enum), + CompletionUsage(ref), + CreateEmbeddingRequest(class), + Input(oneOf), + InputVariant1(string), + InputVariant2(array), + InputVariant2Item(string), + InputVariant3(array), + InputVariant3Item(int), + InputVariant4(array), + InputVariant4Item(array), + InputVariant4ItemItem(int), + Model4(anyOf), + ModelVariant1(string), + CreateEmbeddingRequestModel(enum), + CreateEmbeddingRequestEncodingFormat(enum), + Dimensions(int), + User(string), + CreateEmbeddingResponse(class), Data(array), - Image(ref), - Image(class), - B64Json(string), - Url(string), - RevisedPrompt(string), + Embedding(ref), + Model(string), + CreateEmbeddingResponseObject(enum), + CreateEmbeddingResponseUsage(class), + PromptTokens(int), + TotalTokens(int), + CreateFileRequest(class), + File(byte[]), + CreateFileRequestPurpose(enum), + CreateFineTuningJobRequest(class), + Model5(anyOf), + ModelVariant1(string), + CreateFineTuningJobRequestModel(enum), + TrainingFile(string), + CreateFineTuningJobRequestHyperparameters(class), + BatchSize(oneOf), + CreateFineTuningJobRequestHyperparametersBatchSize(enum), + BatchSizeVariant2(int), + LearningRateMultiplier(oneOf), + CreateFineTuningJobRequestHyperparametersLearningRateMultiplier(enum), + LearningRateMultiplierVariant2(double), + NEpochs(oneOf), + CreateFineTuningJobRequestHyperparametersNEpochs(enum), + NEpochsVariant2(int), + Suffix(string), + ValidationFile(string), + Integrations(array), + CreateFineTuningJobRequestIntegration(class), + Type_AllOf1Wrapped(oneOf), + CreateFineTuningJobRequestIntegrationType(enum), + CreateFineTuningJobRequestIntegrationWandb(class), + Project(string), + Name(string), + Entity(string), + Tags(array), + TagsItem(string), + Seed(int), CreateImageEditRequest(class), Image(byte[]), Prompt(string), Mask(byte[]), - Model4(anyOf), + Model6(anyOf), ModelVariant1(string), CreateImageEditRequestModel(enum), N(int), CreateImageEditRequestSize(enum), CreateImageEditRequestResponseFormat(enum), User(string), + CreateImageRequest(class), + Prompt(string), + Model7(anyOf), + ModelVariant1(string), + CreateImageRequestModel(enum), + N(int), + CreateImageRequestQuality(enum), + CreateImageRequestResponseFormat(enum), + CreateImageRequestSize(enum), + CreateImageRequestStyle(enum), + User(string), CreateImageVariationRequest(class), Image(byte[]), - Model5(anyOf), + Model8(anyOf), ModelVariant1(string), CreateImageVariationRequestModel(enum), N(int), CreateImageVariationRequestResponseFormat(enum), CreateImageVariationRequestSize(enum), User(string), + CreateMessageRequest(class), + CreateMessageRequestRole(enum), + Content5(oneOf), + ContentVariant1(string), + ContentVariant2(array), + ContentVariant2Item(oneOf), + MessageContentImageFileObject(ref), + MessageContentImageUrlObject(ref), + MessageRequestContentTextObject(ref), + CreateMessageRequestContentVariant2ItemDiscriminator(class), + CreateMessageRequestContentVariant2ItemDiscriminatorType(enum), + Attachments(array), + CreateMessageRequestAttachment(class), + FileId(string), + Tools(array), + ToolsItem3(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearchTypeOnly(ref), + CreateMessageRequestAttachmentToolDiscriminator(class), + CreateMessageRequestAttachmentToolDiscriminatorType(enum), + CreateMessageRequestMetadata(class), CreateModerationRequest(class), - Input(oneOf), + Input2(oneOf), InputVariant1(string), InputVariant2(array), InputVariant2Item(string), - Model6(anyOf), + InputVariant3(array), + InputVariant3Item(oneOf), + CreateModerationRequestInputVariant3ItemVariant1(class), + CreateModerationRequestInputVariant3ItemVariant1Type(enum), + CreateModerationRequestInputVariant3ItemVariant1ImageUrl(class), + Url(Uri), + CreateModerationRequestInputVariant3ItemVariant2(class), + CreateModerationRequestInputVariant3ItemVariant2Type(enum), + Text(string), + CreateModerationRequestInputVariant3ItemDiscriminator(class), + CreateModerationRequestInputVariant3ItemDiscriminatorType(enum), + Model9(anyOf), ModelVariant1(string), CreateModerationRequestModel(enum), CreateModerationResponse(class), @@ -379,6 +790,8 @@ HateThreatening(bool), Harassment(bool), HarassmentThreatening(bool), + Illicit(bool), + IllicitViolent(bool), SelfHarm(bool), SelfHarmIntent(bool), SelfHarmInstructions(bool), @@ -391,6 +804,8 @@ HateThreatening(double), Harassment(double), HarassmentThreatening(double), + Illicit(double), + IllicitViolent(double), SelfHarm(double), SelfHarmIntent(double), SelfHarmInstructions(double), @@ -398,121 +813,139 @@ SexualMinors(double), Violence(double), ViolenceGraphic(double), - ListFilesResponse(class), - Data(array), - OpenAIFile(ref), - ListFilesResponseObject(enum), - CreateFileRequest(class), - File(byte[]), - CreateFileRequestPurpose(enum), - DeleteFileResponse(class), - Id(string), - DeleteFileResponseObject(enum), - Deleted(bool), - CreateUploadRequest(class), - Filename(string), - CreateUploadRequestPurpose(enum), - Bytes(int), - MimeType(string), - AddUploadPartRequest(class), - Data(byte[]), - CompleteUploadRequest(class), - PartIds(array), - PartIdsItem(string), - Md5(string), - CancelUploadRequest(class), - CreateFineTuningJobRequest(class), - Model7(anyOf), + CreateModerationResponseResultCategoryAppliedInputTypes(class), + Hate(array), + CreateModerationResponseResultCategoryAppliedInputTypesHateItem(enum), + HateThreatening(array), + CreateModerationResponseResultCategoryAppliedInputTypesHateThreateningItem(enum), + Harassment(array), + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentItem(enum), + HarassmentThreatening(array), + CreateModerationResponseResultCategoryAppliedInputTypesHarassmentThreateningItem(enum), + Illicit(array), + CreateModerationResponseResultCategoryAppliedInputTypesIllicitItem(enum), + IllicitViolent(array), + CreateModerationResponseResultCategoryAppliedInputTypesIllicitViolentItem(enum), + SelfHarm(array), + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmItem(enum), + SelfHarmIntent(array), + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmIntentItem(enum), + SelfHarmInstructions(array), + CreateModerationResponseResultCategoryAppliedInputTypesSelfHarmInstruction(enum), + Sexual(array), + CreateModerationResponseResultCategoryAppliedInputTypesSexualItem(enum), + SexualMinors(array), + CreateModerationResponseResultCategoryAppliedInputTypesSexualMinor(enum), + Violence(array), + CreateModerationResponseResultCategoryAppliedInputTypesViolenceItem(enum), + ViolenceGraphic(array), + CreateModerationResponseResultCategoryAppliedInputTypesViolenceGraphicItem(enum), + CreateRunRequest(class), + AssistantId(string), + Model10(anyOf), ModelVariant1(string), - CreateFineTuningJobRequestModel(enum), - TrainingFile(string), - CreateFineTuningJobRequestHyperparameters(class), - BatchSize(oneOf), - CreateFineTuningJobRequestHyperparametersBatchSize(enum), - BatchSizeVariant2(int), - LearningRateMultiplier(oneOf), - CreateFineTuningJobRequestHyperparametersLearningRateMultiplier(enum), - LearningRateMultiplierVariant2(double), - NEpochs(oneOf), - CreateFineTuningJobRequestHyperparametersNEpochs(enum), - NEpochsVariant2(int), - Suffix(string), - ValidationFile(string), - Integrations(array), - CreateFineTuningJobRequestIntegration(class), - Type_AllOf1Wrapped(oneOf), - CreateFineTuningJobRequestIntegrationType(enum), - CreateFineTuningJobRequestIntegrationWandb(class), - Project(string), - Name(string), - Entity(string), - Tags(array), - TagsItem(string), - Seed(int), - ListFineTuningJobEventsResponse(class), - Data(array), - FineTuningJobEvent(ref), - ListFineTuningJobEventsResponseObject(enum), - ListFineTuningJobCheckpointsResponse(class), - Data(array), - FineTuningJobCheckpoint(ref), - ListFineTuningJobCheckpointsResponseObject(enum), - FirstId(string), - LastId(string), - HasMore(bool), - CreateEmbeddingRequest(class), - Input2(oneOf), - InputVariant1(string), - InputVariant2(array), - InputVariant2Item(string), - InputVariant3(array), - InputVariant3Item(int), - InputVariant4(array), - InputVariant4Item(array), - InputVariant4ItemItem(int), - Model8(anyOf), + CreateRunRequestModel(enum), + Instructions(string), + AdditionalInstructions(string), + AdditionalMessages(array), + CreateMessageRequest(ref), + Tools(array), + ToolsItem4(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearch(ref), + AssistantToolsFunction(ref), + CreateRunRequestToolDiscriminator(class), + CreateRunRequestToolDiscriminatorType(enum), + CreateRunRequestMetadata(class), + Temperature(double), + TopP(double), + Stream(bool), + MaxPromptTokens(int), + MaxCompletionTokens(int), + TruncationObject(ref), + AssistantsApiToolChoiceOption(ref), + ParallelToolCalls(ref), + AssistantsApiResponseFormatOption(ref), + CreateSpeechRequest(class), + Model11(anyOf), ModelVariant1(string), - CreateEmbeddingRequestModel(enum), - CreateEmbeddingRequestEncodingFormat(enum), - Dimensions(int), - User(string), - CreateEmbeddingResponse(class), - Data(array), - Embedding(ref), - Model(string), - CreateEmbeddingResponseObject(enum), - CreateEmbeddingResponseUsage(class), - PromptTokens(int), - TotalTokens(int), + CreateSpeechRequestModel(enum), + Input(string), + CreateSpeechRequestVoice(enum), + CreateSpeechRequestResponseFormat(enum), + Speed(double), + CreateThreadAndRunRequest(class), + AssistantId(string), + CreateThreadRequest(ref), + Model12(anyOf), + ModelVariant1(string), + CreateThreadAndRunRequestModel(enum), + Instructions(string), + Tools(array), + ToolsItem5(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearch(ref), + AssistantToolsFunction(ref), + CreateThreadAndRunRequestToolDiscriminator(class), + CreateThreadAndRunRequestToolDiscriminatorType(enum), + CreateThreadAndRunRequestToolResources(class), + CreateThreadAndRunRequestToolResourcesCodeInterpreter(class), + FileIds(array), + FileIdsItem(string), + CreateThreadAndRunRequestToolResourcesFileSearch(class), + VectorStoreIds(array), + VectorStoreIdsItem(string), + CreateThreadAndRunRequestMetadata(class), + Temperature(double), + TopP(double), + Stream(bool), + MaxPromptTokens(int), + MaxCompletionTokens(int), + TruncationObject(ref), + AssistantsApiToolChoiceOption(ref), + ParallelToolCalls(ref), + AssistantsApiResponseFormatOption(ref), + CreateThreadRequest(class), + Messages(array), + CreateMessageRequest(ref), + CreateThreadRequestToolResources(class), + CreateThreadRequestToolResourcesCodeInterpreter(class), + FileIds(array), + FileIdsItem(string), + CreateThreadRequestToolResourcesFileSearch(class), + VectorStoreIds(array), + VectorStoreIdsItem(string), + VectorStores(array), + CreateThreadRequestToolResourcesFileSearchVectorStore(class), + FileIds(array), + FileIdsItem(string), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class), + MaxChunkSizeTokens(int), + ChunkOverlapTokens(int), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class), + CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum), + CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata(class), + CreateThreadRequestToolResourcesFileSearchVariant1(class), + CreateThreadRequestToolResourcesFileSearchVariant2(class), + CreateThreadRequestMetadata(class), CreateTranscriptionRequest(class), File(byte[]), - Model9(anyOf), + Model13(anyOf), ModelVariant1(string), CreateTranscriptionRequestModel(enum), Language(string), Prompt(string), - CreateTranscriptionRequestResponseFormat(enum), - Temperature(double), - TimestampGranularities(array), - CreateTranscriptionRequestTimestampGranularitie(enum), - CreateTranscriptionResponseJson(class), - Text(string), - TranscriptionSegment(class), - Id(int), - Seek(int), - Start(float), - End(float), + AudioResponseFormat(ref), + Temperature(double), + TimestampGranularities(array), + CreateTranscriptionRequestTimestampGranularitie(enum), + CreateTranscriptionResponseJson(class), Text(string), - Tokens(array), - TokensItem(int), - Temperature(float), - AvgLogprob(float), - CompressionRatio(float), - NoSpeechProb(float), - TranscriptionWord(class), - Word(string), - Start(float), - End(float), CreateTranscriptionResponseVerboseJson(class), Language(string), Duration(string), @@ -523,11 +956,11 @@ TranscriptionSegment(ref), CreateTranslationRequest(class), File(byte[]), - Model10(anyOf), + Model14(anyOf), ModelVariant1(string), CreateTranslationRequestModel(enum), Prompt(string), - ResponseFormat(string), + AudioResponseFormat(ref), Temperature(double), CreateTranslationResponseJson(class), Text(string), @@ -537,48 +970,93 @@ Text(string), Segments(array), TranscriptionSegment(ref), - CreateSpeechRequest(class), - Model11(anyOf), - ModelVariant1(string), - CreateSpeechRequestModel(enum), - Input(string), - CreateSpeechRequestVoice(enum), - CreateSpeechRequestResponseFormat(enum), - Speed(double), - Model12(class), + CreateUploadRequest(class), + Filename(string), + CreateUploadRequestPurpose(enum), + Bytes(int), + MimeType(string), + CreateVectorStoreFileBatchRequest(class), + FileIds(array), + FileIdsItem(string), + ChunkingStrategyRequestParam(ref), + CreateVectorStoreFileRequest(class), + FileId(string), + ChunkingStrategyRequestParam(ref), + CreateVectorStoreRequest(class), + FileIds(array), + FileIdsItem(string), + Name(string), + VectorStoreExpirationAfter(ref), + CreateVectorStoreRequestChunkingStrategy(class), + AutoChunkingStrategyRequestParam(ref), + StaticChunkingStrategyRequestParam(ref), + CreateVectorStoreRequestChunkingStrategyDiscriminator(class), + CreateVectorStoreRequestChunkingStrategyDiscriminatorType(enum), + CreateVectorStoreRequestMetadata(class), + DefaultProjectErrorResponse(class), + Code(int), + Message(string), + DeleteAssistantResponse(class), Id(string), - Created(int), - ModelObject(enum), - OwnedBy(string), - OpenAIFile(class), + Deleted(bool), + DeleteAssistantResponseObject(enum), + DeleteFileResponse(class), Id(string), - Bytes(int), - CreatedAt(int), - Filename(string), - OpenAIFileObject(enum), - OpenAIFilePurpose(enum), - OpenAIFileStatus(enum), - StatusDetails(string), - Upload(class), + DeleteFileResponseObject(enum), + Deleted(bool), + DeleteMessageResponse(class), Id(string), - CreatedAt(int), - Filename(string), - Bytes(int), - Purpose(string), - UploadStatus(enum), - ExpiresAt(int), - UploadObject(enum), - OpenAIFile(ref), - UploadPart(class), + Deleted(bool), + DeleteMessageResponseObject(enum), + DeleteModelResponse(class), Id(string), - CreatedAt(int), - UploadId(string), - UploadPartObject(enum), + Deleted(bool), + Object(string), + DeleteThreadResponse(class), + Id(string), + Deleted(bool), + DeleteThreadResponseObject(enum), + DeleteVectorStoreFileResponse(class), + Id(string), + Deleted(bool), + DeleteVectorStoreFileResponseObject(enum), + DeleteVectorStoreResponse(class), + Id(string), + Deleted(bool), + DeleteVectorStoreResponseObject(enum), + DoneEvent(class), + DoneEventEvent(enum), + DoneEventData(enum), Embedding(class), Index(int), Embedding1(array), Embedding1Item(double), EmbeddingObject(enum), + Error(class), + Code(string), + Message(string), + Param(string), + Type(string), + ErrorEvent(class), + ErrorEventEvent(enum), + Error(ref), + ErrorResponse(class), + Error(ref), + FileSearchRankingOptions(class), + FileSearchRankingOptionsRanker(enum), + ScoreThreshold(double), + FineTuneChatCompletionRequestAssistantMessage(allOf), + FineTuneChatCompletionRequestAssistantMessageVariant1(class), + Weight(int), + ChatCompletionRequestAssistantMessage(ref), + FineTuningIntegration(class), + FineTuningIntegrationType(enum), + FineTuningIntegrationWandb(class), + Project(string), + Name(string), + Entity(string), + Tags(array), + TagsItem(string), FineTuningJob(class), Id(string), CreatedAt(int), @@ -606,20 +1084,6 @@ FineTuningIntegration(ref), Seed(int), EstimatedFinish(int), - FineTuningIntegration(class), - FineTuningIntegrationType(enum), - FineTuningIntegrationWandb(class), - Project(string), - Name(string), - Entity(string), - Tags(array), - TagsItem(string), - FineTuningJobEvent(class), - Id(string), - CreatedAt(int), - FineTuningJobEventLevel(enum), - Message(string), - FineTuningJobEventObject(enum), FineTuningJobCheckpoint(class), Id(string), CreatedAt(int), @@ -635,6 +1099,12 @@ FullValidMeanTokenAccuracy(double), FineTuningJobId(string), FineTuningJobCheckpointObject(enum), + FineTuningJobEvent(class), + Id(string), + CreatedAt(int), + FineTuningJobEventLevel(enum), + Message(string), + FineTuningJobEventObject(enum), FinetuneChatRequestInput(class), Messages(array), MessagesItem(oneOf), @@ -651,119 +1121,43 @@ FinetuneCompletionRequestInput(class), Prompt(string), Completion(string), - CompletionUsage(class), - CompletionTokens(int), - PromptTokens(int), - TotalTokens(int), - RunCompletionUsage(class), - CompletionTokens(int), - PromptTokens(int), - TotalTokens(int), - RunStepCompletionUsage(class), - CompletionTokens(int), - PromptTokens(int), - TotalTokens(int), - AssistantsApiResponseFormatOption(oneOf), - AssistantsApiResponseFormatOptionEnum(enum), - ResponseFormatText(ref), - ResponseFormatJsonObject(ref), - ResponseFormatJsonSchema(ref), - AssistantObject(class), - Id(string), - AssistantObjectObject(enum), - CreatedAt(int), - Name(string), - Description(string), - Model(string), - Instructions(string), - Tools(array), - ToolsItem(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearch(ref), - AssistantToolsFunction(ref), - AssistantObjectToolDiscriminator(class), - AssistantObjectToolDiscriminatorType(enum), - AssistantObjectToolResources(class), - AssistantObjectToolResourcesCodeInterpreter(class), - FileIds(array), - FileIdsItem(string), - AssistantObjectToolResourcesFileSearch(class), - VectorStoreIds(array), - VectorStoreIdsItem(string), - AssistantObjectMetadata(class), - Temperature(double), - TopP(double), - AssistantsApiResponseFormatOption(ref), - CreateAssistantRequest(class), - Model13(anyOf), - ModelVariant1(string), - CreateAssistantRequestModel(enum), - Name(string), + FunctionObject(class), Description(string), - Instructions(string), - Tools(array), - ToolsItem2(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearch(ref), - AssistantToolsFunction(ref), - CreateAssistantRequestToolDiscriminator(class), - CreateAssistantRequestToolDiscriminatorType(enum), - CreateAssistantRequestToolResources(class), - CreateAssistantRequestToolResourcesCodeInterpreter(class), - FileIds(array), - FileIdsItem(string), - CreateAssistantRequestToolResourcesFileSearch(class), - VectorStoreIds(array), - VectorStoreIdsItem(string), - VectorStores(array), - CreateAssistantRequestToolResourcesFileSearchVectorStore(class), - FileIds(array), - FileIdsItem(string), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class), - MaxChunkSizeTokens(int), - ChunkOverlapTokens(int), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class), - CreateAssistantRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum), - CreateAssistantRequestToolResourcesFileSearchVectorStoreMetadata(class), - CreateAssistantRequestToolResourcesFileSearchVariant1(class), - CreateAssistantRequestToolResourcesFileSearchVariant2(class), - CreateAssistantRequestMetadata(class), - Temperature(double), - TopP(double), - AssistantsApiResponseFormatOption(ref), - ModifyAssistantRequest(class), - Model_AllOf1Wrapped(anyOf), - ModelVariant1(string), Name(string), - Description(string), - Instructions(string), - Tools(array), - ToolsItem3(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearch(ref), - AssistantToolsFunction(ref), - ModifyAssistantRequestToolDiscriminator(class), - ModifyAssistantRequestToolDiscriminatorType(enum), - ModifyAssistantRequestToolResources(class), - ModifyAssistantRequestToolResourcesCodeInterpreter(class), - FileIds(array), - FileIdsItem(string), - ModifyAssistantRequestToolResourcesFileSearch(class), - VectorStoreIds(array), - VectorStoreIdsItem(string), - ModifyAssistantRequestMetadata(class), - Temperature(double), - TopP(double), - AssistantsApiResponseFormatOption(ref), - DeleteAssistantResponse(class), + FunctionParameters(ref), + Strict(bool), + FunctionParameters(class), + Image(class), + B64Json(string), + Url(string), + RevisedPrompt(string), + ImagesResponse(class), + Created(int), + Data(array), + Image(ref), + Invite(class), + InviteObject(enum), + Id(string), + Email(string), + InviteRole(enum), + InviteStatus(enum), + InvitedAt(int), + ExpiresAt(int), + AcceptedAt(int), + InviteDeleteResponse(class), + InviteDeleteResponseObject(enum), Id(string), Deleted(bool), - DeleteAssistantResponseObject(enum), + InviteListResponse(class), + InviteListResponseObject(enum), + Data(array), + Invite(ref), + FirstId(string), + LastId(string), + HasMore(bool), + InviteRequest(class), + Email(string), + InviteRequestRole(enum), ListAssistantsResponse(class), Object(string), Data(array), @@ -771,199 +1165,68 @@ FirstId(string), LastId(string), HasMore(bool), - AssistantToolsCode(class), - AssistantToolsCodeType(enum), - AssistantToolsFileSearch(class), - AssistantToolsFileSearchType(enum), - AssistantToolsFileSearchFileSearch(class), - MaxNumResults(int), - AssistantToolsFileSearchTypeOnly(class), - AssistantToolsFileSearchTypeOnlyType(enum), - AssistantToolsFunction(class), - AssistantToolsFunctionType(enum), - FunctionObject(ref), - TruncationObject(class), - TruncationObjectType(enum), - LastMessages(int), - AssistantsApiToolChoiceOption(oneOf), - AssistantsApiToolChoiceOptionEnum(enum), - AssistantsNamedToolChoice(ref), - AssistantsNamedToolChoice(class), - AssistantsNamedToolChoiceType(enum), - AssistantsNamedToolChoiceFunction(class), - Name(string), - RunObject(class), - Id(string), - RunObjectObject(enum), - CreatedAt(int), - ThreadId(string), - AssistantId(string), - RunObjectStatus(enum), - RunObjectRequiredAction(class), - RunObjectRequiredActionType(enum), - RunObjectRequiredActionSubmitToolOutputs(class), - ToolCalls(array), - RunToolCallObject(ref), - RunObjectLastError(class), - RunObjectLastErrorCode(enum), - Message(string), - ExpiresAt(int), - StartedAt(int), - CancelledAt(int), - FailedAt(int), - CompletedAt(int), - RunObjectIncompleteDetails(class), - RunObjectIncompleteDetailsReason(enum), - Model(string), - Instructions(string), - Tools(array), - ToolsItem4(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearch(ref), - AssistantToolsFunction(ref), - RunObjectToolDiscriminator(class), - RunObjectToolDiscriminatorType(enum), - RunObjectMetadata(class), - RunCompletionUsage(ref), - Temperature(double), - TopP(double), - MaxPromptTokens(int), - MaxCompletionTokens(int), - TruncationObject(ref), - AssistantsApiToolChoiceOption(ref), - ParallelToolCalls(ref), - AssistantsApiResponseFormatOption(ref), - CreateRunRequest(class), - AssistantId(string), - Model14(anyOf), - ModelVariant1(string), - CreateRunRequestModel(enum), - Instructions(string), - AdditionalInstructions(string), - AdditionalMessages(array), - CreateMessageRequest(ref), - Tools(array), - ToolsItem5(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearch(ref), - AssistantToolsFunction(ref), - CreateRunRequestToolDiscriminator(class), - CreateRunRequestToolDiscriminatorType(enum), - CreateRunRequestMetadata(class), - Temperature(double), - TopP(double), - Stream(bool), - MaxPromptTokens(int), - MaxCompletionTokens(int), - TruncationObject(ref), - AssistantsApiToolChoiceOption(ref), - ParallelToolCalls(ref), - AssistantsApiResponseFormatOption(ref), - ListRunsResponse(class), + ListAuditLogsResponse(class), + ListAuditLogsResponseObject(enum), + Data(array), + AuditLog(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ListBatchesResponse(class), + Data(array), + Batch(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ListBatchesResponseObject(enum), + ListFilesResponse(class), Object(string), Data(array), - RunObject(ref), + OpenAIFile(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ListFineTuningJobCheckpointsResponse(class), + Data(array), + FineTuningJobCheckpoint(ref), + ListFineTuningJobCheckpointsResponseObject(enum), + FirstId(string), + LastId(string), + HasMore(bool), + ListFineTuningJobEventsResponse(class), + Data(array), + FineTuningJobEvent(ref), + ListFineTuningJobEventsResponseObject(enum), + ListMessagesResponse(class), + Object(string), + Data(array), + MessageObject(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ListModelsResponse(class), + ListModelsResponseObject(enum), + Data(array), + Model15(ref), + ListPaginatedFineTuningJobsResponse(class), + Data(array), + FineTuningJob(ref), + HasMore(bool), + ListPaginatedFineTuningJobsResponseObject(enum), + ListRunStepsResponse(class), + Object(string), + Data(array), + RunStepObject(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ListRunsResponse(class), + Object(string), + Data(array), + RunObject(ref), FirstId(string), LastId(string), HasMore(bool), - ModifyRunRequest(class), - ModifyRunRequestMetadata(class), - SubmitToolOutputsRunRequest(class), - ToolOutputs(array), - SubmitToolOutputsRunRequestToolOutput(class), - ToolCallId(string), - Output(string), - Stream(bool), - RunToolCallObject(class), - Id(string), - RunToolCallObjectType(enum), - RunToolCallObjectFunction(class), - Name(string), - Arguments(string), - CreateThreadAndRunRequest(class), - AssistantId(string), - CreateThreadRequest(ref), - Model15(anyOf), - ModelVariant1(string), - CreateThreadAndRunRequestModel(enum), - Instructions(string), - Tools(array), - ToolsItem6(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearch(ref), - AssistantToolsFunction(ref), - CreateThreadAndRunRequestToolDiscriminator(class), - CreateThreadAndRunRequestToolDiscriminatorType(enum), - CreateThreadAndRunRequestToolResources(class), - CreateThreadAndRunRequestToolResourcesCodeInterpreter(class), - FileIds(array), - FileIdsItem(string), - CreateThreadAndRunRequestToolResourcesFileSearch(class), - VectorStoreIds(array), - VectorStoreIdsItem(string), - CreateThreadAndRunRequestMetadata(class), - Temperature(double), - TopP(double), - Stream(bool), - MaxPromptTokens(int), - MaxCompletionTokens(int), - TruncationObject(ref), - AssistantsApiToolChoiceOption(ref), - ParallelToolCalls(ref), - AssistantsApiResponseFormatOption(ref), - ThreadObject(class), - Id(string), - ThreadObjectObject(enum), - CreatedAt(int), - ThreadObjectToolResources(class), - ThreadObjectToolResourcesCodeInterpreter(class), - FileIds(array), - FileIdsItem(string), - ThreadObjectToolResourcesFileSearch(class), - VectorStoreIds(array), - VectorStoreIdsItem(string), - ThreadObjectMetadata(class), - CreateThreadRequest(class), - Messages(array), - CreateMessageRequest(ref), - CreateThreadRequestToolResources(class), - CreateThreadRequestToolResourcesCodeInterpreter(class), - FileIds(array), - FileIdsItem(string), - CreateThreadRequestToolResourcesFileSearch(class), - VectorStoreIds(array), - VectorStoreIdsItem(string), - VectorStores(array), - CreateThreadRequestToolResourcesFileSearchVectorStore(class), - FileIds(array), - FileIdsItem(string), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategy(class), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1(class), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant1Type(enum), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2(class), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Type(enum), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyVariant2Static(class), - MaxChunkSizeTokens(int), - ChunkOverlapTokens(int), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminator(class), - CreateThreadRequestToolResourcesFileSearchVectorStoreChunkingStrategyDiscriminatorType(enum), - CreateThreadRequestToolResourcesFileSearchVectorStoreMetadata(class), - CreateThreadRequestToolResourcesFileSearchVariant1(class), - CreateThreadRequestToolResourcesFileSearchVariant2(class), - CreateThreadRequestMetadata(class), - ModifyThreadRequest(class), - ModifyThreadRequestToolResources(class), - ModifyThreadRequestToolResourcesCodeInterpreter(class), - FileIds(array), - FileIdsItem(string), - ModifyThreadRequestToolResourcesFileSearch(class), - VectorStoreIds(array), - VectorStoreIdsItem(string), - ModifyThreadRequestMetadata(class), - DeleteThreadResponse(class), - Id(string), - Deleted(bool), - DeleteThreadResponseObject(enum), ListThreadsResponse(class), Object(string), Data(array), @@ -971,81 +1234,17 @@ FirstId(string), LastId(string), HasMore(bool), - MessageObject(class), - Id(string), - MessageObjectObject(enum), - CreatedAt(int), - ThreadId(string), - MessageObjectStatus(enum), - MessageObjectIncompleteDetails(class), - MessageObjectIncompleteDetailsReason(enum), - CompletedAt(int), - IncompleteAt(int), - MessageObjectRole(enum), - Content(array), - ContentItem(oneOf), - MessageContentImageFileObject(ref), - MessageContentImageUrlObject(ref), - MessageContentTextObject(ref), - MessageContentRefusalObject(ref), - MessageObjectContentItemDiscriminator(class), - MessageObjectContentItemDiscriminatorType(enum), - AssistantId(string), - RunId(string), - Attachments(array), - MessageObjectAttachment(class), - FileId(string), - Tools(array), - ToolsItem7(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearchTypeOnly(ref), - MessageObjectAttachmentToolDiscriminator(class), - MessageObjectAttachmentToolDiscriminatorType(enum), - MessageObjectMetadata(class), - MessageDeltaObject(class), - Id(string), - MessageDeltaObjectObject(enum), - MessageDeltaObjectDelta(class), - MessageDeltaObjectDeltaRole(enum), - Content(array), - ContentItem2(oneOf), - MessageDeltaContentImageFileObject(ref), - MessageDeltaContentTextObject(ref), - MessageDeltaContentRefusalObject(ref), - MessageDeltaContentImageUrlObject(ref), - MessageDeltaObjectDeltaContentItemDiscriminator(class), - MessageDeltaObjectDeltaContentItemDiscriminatorType(enum), - CreateMessageRequest(class), - CreateMessageRequestRole(enum), - Content5(oneOf), - ContentVariant1(string), - ContentVariant2(array), - ContentVariant2Item(oneOf), - MessageContentImageFileObject(ref), - MessageContentImageUrlObject(ref), - MessageRequestContentTextObject(ref), - CreateMessageRequestContentVariant2ItemDiscriminator(class), - CreateMessageRequestContentVariant2ItemDiscriminatorType(enum), - Attachments(array), - CreateMessageRequestAttachment(class), - FileId(string), - Tools(array), - ToolsItem8(oneOf), - AssistantToolsCode(ref), - AssistantToolsFileSearchTypeOnly(ref), - CreateMessageRequestAttachmentToolDiscriminator(class), - CreateMessageRequestAttachmentToolDiscriminatorType(enum), - CreateMessageRequestMetadata(class), - ModifyMessageRequest(class), - ModifyMessageRequestMetadata(class), - DeleteMessageResponse(class), - Id(string), - Deleted(bool), - DeleteMessageResponseObject(enum), - ListMessagesResponse(class), + ListVectorStoreFilesResponse(class), Object(string), Data(array), - MessageObject(ref), + VectorStoreFileObject(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ListVectorStoresResponse(class), + Object(string), + Data(array), + VectorStoreObject(ref), FirstId(string), LastId(string), HasMore(bool), @@ -1054,39 +1253,14 @@ MessageContentImageFileObjectImageFile(class), FileId(string), MessageContentImageFileObjectImageFileDetail(enum), - MessageDeltaContentImageFileObject(class), - Index(int), - MessageDeltaContentImageFileObjectType(enum), - MessageDeltaContentImageFileObjectImageFile(class), - FileId(string), - MessageDeltaContentImageFileObjectImageFileDetail(enum), MessageContentImageUrlObject(class), MessageContentImageUrlObjectType(enum), MessageContentImageUrlObjectImageUrl(class), Url(Uri), MessageContentImageUrlObjectImageUrlDetail(enum), - MessageDeltaContentImageUrlObject(class), - Index(int), - MessageDeltaContentImageUrlObjectType(enum), - MessageDeltaContentImageUrlObjectImageUrl(class), - Url(string), - MessageDeltaContentImageUrlObjectImageUrlDetail(enum), - MessageContentTextObject(class), - MessageContentTextObjectType(enum), - MessageContentTextObjectText(class), - Value(string), - Annotations(array), - AnnotationsItem(oneOf), - MessageContentTextAnnotationsFileCitationObject(ref), - MessageContentTextAnnotationsFilePathObject(ref), - MessageContentTextObjectTextAnnotationDiscriminator(class), - MessageContentTextObjectTextAnnotationDiscriminatorType(enum), MessageContentRefusalObject(class), MessageContentRefusalObjectType(enum), Refusal(string), - MessageRequestContentTextObject(class), - MessageRequestContentTextObjectType(enum), - Text(string), MessageContentTextAnnotationsFileCitationObject(class), MessageContentTextAnnotationsFileCitationObjectType(enum), Text(string), @@ -1101,17 +1275,28 @@ FileId(string), StartIndex(int), EndIndex(int), - MessageDeltaContentTextObject(class), - Index(int), - MessageDeltaContentTextObjectType(enum), - MessageDeltaContentTextObjectText(class), + MessageContentTextObject(class), + MessageContentTextObjectType(enum), + MessageContentTextObjectText(class), Value(string), Annotations(array), - AnnotationsItem2(oneOf), - MessageDeltaContentTextAnnotationsFileCitationObject(ref), - MessageDeltaContentTextAnnotationsFilePathObject(ref), - MessageDeltaContentTextObjectTextAnnotationDiscriminator(class), - MessageDeltaContentTextObjectTextAnnotationDiscriminatorType(enum), + AnnotationsItem(oneOf), + MessageContentTextAnnotationsFileCitationObject(ref), + MessageContentTextAnnotationsFilePathObject(ref), + MessageContentTextObjectTextAnnotationDiscriminator(class), + MessageContentTextObjectTextAnnotationDiscriminatorType(enum), + MessageDeltaContentImageFileObject(class), + Index(int), + MessageDeltaContentImageFileObjectType(enum), + MessageDeltaContentImageFileObjectImageFile(class), + FileId(string), + MessageDeltaContentImageFileObjectImageFileDetail(enum), + MessageDeltaContentImageUrlObject(class), + Index(int), + MessageDeltaContentImageUrlObjectType(enum), + MessageDeltaContentImageUrlObjectImageUrl(class), + Url(string), + MessageDeltaContentImageUrlObjectImageUrlDetail(enum), MessageDeltaContentRefusalObject(class), Index(int), MessageDeltaContentRefusalObjectType(enum), @@ -1133,498 +1318,881 @@ FileId(string), StartIndex(int), EndIndex(int), - RunStepObject(class), + MessageDeltaContentTextObject(class), + Index(int), + MessageDeltaContentTextObjectType(enum), + MessageDeltaContentTextObjectText(class), + Value(string), + Annotations(array), + AnnotationsItem2(oneOf), + MessageDeltaContentTextAnnotationsFileCitationObject(ref), + MessageDeltaContentTextAnnotationsFilePathObject(ref), + MessageDeltaContentTextObjectTextAnnotationDiscriminator(class), + MessageDeltaContentTextObjectTextAnnotationDiscriminatorType(enum), + MessageDeltaObject(class), + Id(string), + MessageDeltaObjectObject(enum), + MessageDeltaObjectDelta(class), + MessageDeltaObjectDeltaRole(enum), + Content(array), + ContentItem(oneOf), + MessageDeltaContentImageFileObject(ref), + MessageDeltaContentTextObject(ref), + MessageDeltaContentRefusalObject(ref), + MessageDeltaContentImageUrlObject(ref), + MessageDeltaObjectDeltaContentItemDiscriminator(class), + MessageDeltaObjectDeltaContentItemDiscriminatorType(enum), + MessageObject(class), + Id(string), + MessageObjectObject(enum), + CreatedAt(int), + ThreadId(string), + MessageObjectStatus(enum), + MessageObjectIncompleteDetails(class), + MessageObjectIncompleteDetailsReason(enum), + CompletedAt(int), + IncompleteAt(int), + MessageObjectRole(enum), + Content(array), + ContentItem2(oneOf), + MessageContentImageFileObject(ref), + MessageContentImageUrlObject(ref), + MessageContentTextObject(ref), + MessageContentRefusalObject(ref), + MessageObjectContentItemDiscriminator(class), + MessageObjectContentItemDiscriminatorType(enum), + AssistantId(string), + RunId(string), + Attachments(array), + MessageObjectAttachment(class), + FileId(string), + Tools(array), + ToolsItem6(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearchTypeOnly(ref), + MessageObjectAttachmentToolDiscriminator(class), + MessageObjectAttachmentToolDiscriminatorType(enum), + MessageObjectMetadata(class), + MessageRequestContentTextObject(class), + MessageRequestContentTextObjectType(enum), + Text(string), + Model15(class), + Id(string), + Created(int), + ModelObject(enum), + OwnedBy(string), + ModifyAssistantRequest(class), + Model_AllOf1Wrapped(anyOf), + ModelVariant1(string), + Name(string), + Description(string), + Instructions(string), + Tools(array), + ToolsItem7(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearch(ref), + AssistantToolsFunction(ref), + ModifyAssistantRequestToolDiscriminator(class), + ModifyAssistantRequestToolDiscriminatorType(enum), + ModifyAssistantRequestToolResources(class), + ModifyAssistantRequestToolResourcesCodeInterpreter(class), + FileIds(array), + FileIdsItem(string), + ModifyAssistantRequestToolResourcesFileSearch(class), + VectorStoreIds(array), + VectorStoreIdsItem(string), + ModifyAssistantRequestMetadata(class), + Temperature(double), + TopP(double), + AssistantsApiResponseFormatOption(ref), + ModifyMessageRequest(class), + ModifyMessageRequestMetadata(class), + ModifyRunRequest(class), + ModifyRunRequestMetadata(class), + ModifyThreadRequest(class), + ModifyThreadRequestToolResources(class), + ModifyThreadRequestToolResourcesCodeInterpreter(class), + FileIds(array), + FileIdsItem(string), + ModifyThreadRequestToolResourcesFileSearch(class), + VectorStoreIds(array), + VectorStoreIdsItem(string), + ModifyThreadRequestMetadata(class), + OpenAIFile(class), + Id(string), + Bytes(int), + CreatedAt(int), + Filename(string), + OpenAIFileObject(enum), + OpenAIFilePurpose(enum), + OpenAIFileStatus(enum), + StatusDetails(string), + OtherChunkingStrategyResponseParam(class), + OtherChunkingStrategyResponseParamType(enum), + ParallelToolCalls(bool), + PredictionContent(class), + PredictionContentType(enum), + Content6(oneOf), + ContentVariant1(string), + ContentVariant2(array), + ChatCompletionRequestMessageContentPartText(ref), + Project(class), Id(string), - RunStepObjectObject(enum), + ProjectObject(enum), + Name(string), + CreatedAt(int), + ArchivedAt(int), + ProjectStatus(enum), + ProjectApiKey(class), + ProjectApiKeyObject(enum), + RedactedValue(string), + Name(string), CreatedAt(int), - AssistantId(string), - ThreadId(string), - RunId(string), - RunStepObjectType(enum), - RunStepObjectStatus(enum), - RunStepObjectStepDetails(class), - RunStepDetailsMessageCreationObject(ref), - RunStepDetailsToolCallsObject(ref), - RunStepObjectStepDetailsDiscriminator(class), - RunStepObjectStepDetailsDiscriminatorType(enum), - RunStepObjectLastError(class), - RunStepObjectLastErrorCode(enum), - Message(string), - ExpiredAt(int), - CancelledAt(int), - FailedAt(int), - CompletedAt(int), - RunStepObjectMetadata(class), - RunStepCompletionUsage(ref), - RunStepDeltaObject(class), Id(string), - RunStepDeltaObjectObject(enum), - RunStepDeltaObjectDelta(class), - RunStepDeltaObjectDeltaStepDetails(class), - RunStepDeltaStepDetailsMessageCreationObject(ref), - RunStepDeltaStepDetailsToolCallsObject(ref), - RunStepDeltaObjectDeltaStepDetailsDiscriminator(class), - RunStepDeltaObjectDeltaStepDetailsDiscriminatorType(enum), - ListRunStepsResponse(class), - Object(string), + ProjectApiKeyOwner(class), + ProjectApiKeyOwnerType(enum), + ProjectUser(ref), + ProjectServiceAccount(ref), + ProjectApiKeyDeleteResponse(class), + ProjectApiKeyDeleteResponseObject(enum), + Id(string), + Deleted(bool), + ProjectApiKeyListResponse(class), + ProjectApiKeyListResponseObject(enum), Data(array), - RunStepObject(ref), + ProjectApiKey(ref), FirstId(string), LastId(string), HasMore(bool), - RunStepDetailsMessageCreationObject(class), - RunStepDetailsMessageCreationObjectType(enum), - RunStepDetailsMessageCreationObjectMessageCreation(class), - MessageId(string), - RunStepDeltaStepDetailsMessageCreationObject(class), - RunStepDeltaStepDetailsMessageCreationObjectType(enum), - RunStepDeltaStepDetailsMessageCreationObjectMessageCreation(class), - MessageId(string), - RunStepDetailsToolCallsObject(class), - RunStepDetailsToolCallsObjectType(enum), - ToolCalls(array), - ToolCallsItem(oneOf), - RunStepDetailsToolCallsCodeObject(ref), - RunStepDetailsToolCallsFileSearchObject(ref), - RunStepDetailsToolCallsFunctionObject(ref), - RunStepDetailsToolCallsObjectToolCallDiscriminator(class), - RunStepDetailsToolCallsObjectToolCallDiscriminatorType(enum), - RunStepDeltaStepDetailsToolCallsObject(class), - RunStepDeltaStepDetailsToolCallsObjectType(enum), - ToolCalls(array), - ToolCallsItem2(oneOf), - RunStepDeltaStepDetailsToolCallsCodeObject(ref), - RunStepDeltaStepDetailsToolCallsFileSearchObject(ref), - RunStepDeltaStepDetailsToolCallsFunctionObject(ref), - RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator(class), - RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType(enum), - RunStepDetailsToolCallsCodeObject(class), - Id(string), - RunStepDetailsToolCallsCodeObjectType(enum), - RunStepDetailsToolCallsCodeObjectCodeInterpreter(class), - Input(string), - Outputs(array), - RunStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class), - RunStepDetailsToolCallsCodeOutputLogsObject(ref), - RunStepDetailsToolCallsCodeOutputImageObject(ref), - RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class), - RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum), - RunStepDeltaStepDetailsToolCallsCodeObject(class), - Index(int), - Id(string), - RunStepDeltaStepDetailsToolCallsCodeObjectType(enum), - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter(class), - Input(string), - Outputs(array), - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class), - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(ref), - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(ref), - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class), - RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum), - RunStepDetailsToolCallsCodeOutputLogsObject(class), - RunStepDetailsToolCallsCodeOutputLogsObjectType(enum), - Logs(string), - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(class), - Index(int), - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType(enum), - Logs(string), - RunStepDetailsToolCallsCodeOutputImageObject(class), - RunStepDetailsToolCallsCodeOutputImageObjectType(enum), - RunStepDetailsToolCallsCodeOutputImageObjectImage(class), - FileId(string), - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(class), - Index(int), - RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType(enum), - RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage(class), - FileId(string), - RunStepDetailsToolCallsFileSearchObject(class), - Id(string), - RunStepDetailsToolCallsFileSearchObjectType(enum), - RunStepDetailsToolCallsFileSearchObjectFileSearch(class), - RunStepDeltaStepDetailsToolCallsFileSearchObject(class), - Index(int), - Id(string), - RunStepDeltaStepDetailsToolCallsFileSearchObjectType(enum), - RunStepDeltaStepDetailsToolCallsFileSearchObjectFileSearch(class), - RunStepDetailsToolCallsFunctionObject(class), - Id(string), - RunStepDetailsToolCallsFunctionObjectType(enum), - RunStepDetailsToolCallsFunctionObjectFunction(class), - Name(string), - Arguments(string), - Output(string), - RunStepDeltaStepDetailsToolCallsFunctionObject(class), - Index(int), + ProjectCreateRequest(class), + Name(string), + ProjectListResponse(class), + ProjectListResponseObject(enum), + Data(array), + Project(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ProjectRateLimit(class), + ProjectRateLimitObject(enum), Id(string), - RunStepDeltaStepDetailsToolCallsFunctionObjectType(enum), - RunStepDeltaStepDetailsToolCallsFunctionObjectFunction(class), - Name(string), - Arguments(string), - Output(string), - VectorStoreExpirationAfter(class), - VectorStoreExpirationAfterAnchor(enum), - Days(int), - VectorStoreObject(class), + Model(string), + MaxRequestsPer1Minute(int), + MaxTokensPer1Minute(int), + MaxImagesPer1Minute(int), + MaxAudioMegabytesPer1Minute(int), + MaxRequestsPer1Day(int), + Batch1DayMaxInputTokens(int), + ProjectRateLimitListResponse(class), + ProjectRateLimitListResponseObject(enum), + Data(array), + ProjectRateLimit(ref), + FirstId(string), + LastId(string), + HasMore(bool), + ProjectRateLimitUpdateRequest(class), + MaxRequestsPer1Minute(int), + MaxTokensPer1Minute(int), + MaxImagesPer1Minute(int), + MaxAudioMegabytesPer1Minute(int), + MaxRequestsPer1Day(int), + Batch1DayMaxInputTokens(int), + ProjectServiceAccount(class), + ProjectServiceAccountObject(enum), Id(string), - VectorStoreObjectObject(enum), + Name(string), + ProjectServiceAccountRole(enum), CreatedAt(int), + ProjectServiceAccountApiKey(class), + ProjectServiceAccountApiKeyObject(enum), + Value(string), Name(string), - UsageBytes(int), - VectorStoreObjectFileCounts(class), - InProgress(int), - Completed(int), - Failed(int), - Cancelled(int), - Total(int), - VectorStoreObjectStatus(enum), - VectorStoreExpirationAfter(ref), - ExpiresAt(int), - LastActiveAt(int), - VectorStoreObjectMetadata(class), - CreateVectorStoreRequest(class), - FileIds(array), - FileIdsItem(string), + CreatedAt(int), + Id(string), + ProjectServiceAccountCreateRequest(class), Name(string), - VectorStoreExpirationAfter(ref), - CreateVectorStoreRequestChunkingStrategy(class), - AutoChunkingStrategyRequestParam(ref), - StaticChunkingStrategyRequestParam(ref), - CreateVectorStoreRequestChunkingStrategyDiscriminator(class), - CreateVectorStoreRequestChunkingStrategyDiscriminatorType(enum), - CreateVectorStoreRequestMetadata(class), - UpdateVectorStoreRequest(class), + ProjectServiceAccountCreateResponse(class), + ProjectServiceAccountCreateResponseObject(enum), + Id(string), Name(string), - VectorStoreExpirationAfter(ref), - UpdateVectorStoreRequestMetadata(class), - ListVectorStoresResponse(class), - Object(string), + ProjectServiceAccountCreateResponseRole(enum), + CreatedAt(int), + ProjectServiceAccountApiKey(ref), + ProjectServiceAccountDeleteResponse(class), + ProjectServiceAccountDeleteResponseObject(enum), + Id(string), + Deleted(bool), + ProjectServiceAccountListResponse(class), + ProjectServiceAccountListResponseObject(enum), Data(array), - VectorStoreObject(ref), + ProjectServiceAccount(ref), FirstId(string), LastId(string), HasMore(bool), - DeleteVectorStoreResponse(class), - Id(string), - Deleted(bool), - DeleteVectorStoreResponseObject(enum), - VectorStoreFileObject(class), + ProjectUpdateRequest(class), + Name(string), + ProjectUser(class), + ProjectUserObject(enum), Id(string), - VectorStoreFileObjectObject(enum), - UsageBytes(int), - CreatedAt(int), - VectorStoreId(string), - VectorStoreFileObjectStatus(enum), - VectorStoreFileObjectLastError(class), - VectorStoreFileObjectLastErrorCode(enum), - Message(string), - VectorStoreFileObjectChunkingStrategy(class), - StaticChunkingStrategyResponseParam(ref), - OtherChunkingStrategyResponseParam(ref), - VectorStoreFileObjectChunkingStrategyDiscriminator(class), - VectorStoreFileObjectChunkingStrategyDiscriminatorType(enum), - OtherChunkingStrategyResponseParam(class), - OtherChunkingStrategyResponseParamType(enum), - StaticChunkingStrategyResponseParam(class), - StaticChunkingStrategyResponseParamType(enum), - StaticChunkingStrategy(ref), - StaticChunkingStrategy(class), - MaxChunkSizeTokens(int), - ChunkOverlapTokens(int), - AutoChunkingStrategyRequestParam(class), - AutoChunkingStrategyRequestParamType(enum), - StaticChunkingStrategyRequestParam(class), - StaticChunkingStrategyRequestParamType(enum), - StaticChunkingStrategy(ref), - ChunkingStrategyRequestParam(class), - AutoChunkingStrategyRequestParam(ref), - StaticChunkingStrategyRequestParam(ref), - ChunkingStrategyRequestParamDiscriminator(class), - ChunkingStrategyRequestParamDiscriminatorType(enum), - CreateVectorStoreFileRequest(class), - FileId(string), - ChunkingStrategyRequestParam(ref), - ListVectorStoreFilesResponse(class), + Name(string), + Email(string), + ProjectUserRole(enum), + AddedAt(int), + ProjectUserCreateRequest(class), + UserId(string), + ProjectUserCreateRequestRole(enum), + ProjectUserDeleteResponse(class), + ProjectUserDeleteResponseObject(enum), + Id(string), + Deleted(bool), + ProjectUserListResponse(class), Object(string), Data(array), - VectorStoreFileObject(ref), + ProjectUser(ref), FirstId(string), LastId(string), HasMore(bool), - DeleteVectorStoreFileResponse(class), - Id(string), - Deleted(bool), - DeleteVectorStoreFileResponseObject(enum), - VectorStoreFileBatchObject(class), + ProjectUserUpdateRequest(class), + ProjectUserUpdateRequestRole(enum), + RealtimeClientEventConversationItemCreate(class), + EventId(string), + RealtimeClientEventConversationItemCreateType(enum), + PreviousItemId(string), + RealtimeConversationItem(ref), + RealtimeClientEventConversationItemDelete(class), + EventId(string), + RealtimeClientEventConversationItemDeleteType(enum), + ItemId(string), + RealtimeClientEventConversationItemTruncate(class), + EventId(string), + RealtimeClientEventConversationItemTruncateType(enum), + ItemId(string), + ContentIndex(int), + AudioEndMs(int), + RealtimeClientEventInputAudioBufferAppend(class), + EventId(string), + RealtimeClientEventInputAudioBufferAppendType(enum), + Audio(string), + RealtimeClientEventInputAudioBufferClear(class), + EventId(string), + RealtimeClientEventInputAudioBufferClearType(enum), + RealtimeClientEventInputAudioBufferCommit(class), + EventId(string), + RealtimeClientEventInputAudioBufferCommitType(enum), + RealtimeClientEventResponseCancel(class), + EventId(string), + RealtimeClientEventResponseCancelType(enum), + RealtimeClientEventResponseCreate(class), + EventId(string), + RealtimeClientEventResponseCreateType(enum), + RealtimeSession(ref), + RealtimeClientEventSessionUpdate(class), + EventId(string), + RealtimeClientEventSessionUpdateType(enum), + RealtimeSession(ref), + RealtimeConversationItem(class), Id(string), - VectorStoreFileBatchObjectObject(enum), - CreatedAt(int), - VectorStoreId(string), - VectorStoreFileBatchObjectStatus(enum), - VectorStoreFileBatchObjectFileCounts(class), - InProgress(int), - Completed(int), - Failed(int), - Cancelled(int), - Total(int), - CreateVectorStoreFileBatchRequest(class), - FileIds(array), - FileIdsItem(string), - ChunkingStrategyRequestParam(ref), - AssistantStreamEvent(oneOf), - ErrorEvent(ref), - DoneEvent(ref), - AssistantStreamEventVariant3(class), - AssistantStreamEventVariant3Event(enum), - ThreadObject(ref), - AssistantStreamEventVariant4(class), - AssistantStreamEventVariant4Event(enum), - RunObject(ref), - AssistantStreamEventVariant5(class), - AssistantStreamEventVariant5Event(enum), - RunObject(ref), - AssistantStreamEventVariant6(class), - AssistantStreamEventVariant6Event(enum), - RunObject(ref), - AssistantStreamEventVariant7(class), - AssistantStreamEventVariant7Event(enum), - RunObject(ref), - AssistantStreamEventVariant8(class), - AssistantStreamEventVariant8Event(enum), - RunObject(ref), - AssistantStreamEventVariant9(class), - AssistantStreamEventVariant9Event(enum), - RunObject(ref), - AssistantStreamEventVariant10(class), - AssistantStreamEventVariant10Event(enum), - RunObject(ref), - AssistantStreamEventVariant11(class), - AssistantStreamEventVariant11Event(enum), - RunObject(ref), - AssistantStreamEventVariant12(class), - AssistantStreamEventVariant12Event(enum), - RunObject(ref), - AssistantStreamEventVariant13(class), - AssistantStreamEventVariant13Event(enum), - RunObject(ref), - AssistantStreamEventVariant14(class), - AssistantStreamEventVariant14Event(enum), - RunStepObject(ref), - AssistantStreamEventVariant15(class), - AssistantStreamEventVariant15Event(enum), - RunStepObject(ref), - AssistantStreamEventVariant16(class), - AssistantStreamEventVariant16Event(enum), - RunStepDeltaObject(ref), - AssistantStreamEventVariant17(class), - AssistantStreamEventVariant17Event(enum), - RunStepObject(ref), - AssistantStreamEventVariant18(class), - AssistantStreamEventVariant18Event(enum), - RunStepObject(ref), - AssistantStreamEventVariant19(class), - AssistantStreamEventVariant19Event(enum), - RunStepObject(ref), - AssistantStreamEventVariant20(class), - AssistantStreamEventVariant20Event(enum), - RunStepObject(ref), - AssistantStreamEventVariant21(class), - AssistantStreamEventVariant21Event(enum), - MessageObject(ref), - AssistantStreamEventVariant22(class), - AssistantStreamEventVariant22Event(enum), - MessageObject(ref), - AssistantStreamEventVariant23(class), - AssistantStreamEventVariant23Event(enum), - MessageDeltaObject(ref), - AssistantStreamEventVariant24(class), - AssistantStreamEventVariant24Event(enum), - MessageObject(ref), - AssistantStreamEventVariant25(class), - AssistantStreamEventVariant25Event(enum), - MessageObject(ref), - AssistantStreamEventDiscriminator(class), - AssistantStreamEventDiscriminatorEvent(enum), - ErrorEvent(class), - ErrorEventEvent(enum), - Error(ref), - DoneEvent(class), - DoneEventEvent(enum), - DoneEventData(enum), - Batch(class), + RealtimeConversationItemType(enum), + RealtimeConversationItemObject(enum), + RealtimeConversationItemStatus(enum), + RealtimeConversationItemRole(enum), + Content(array), + RealtimeConversationItemContentItem(class), + RealtimeConversationItemContentItemType(enum), + Text(string), + Audio(string), + Transcript(string), + CallId(string), + Name(string), + Arguments(string), + Output(string), + RealtimeResponse(class), Id(string), - BatchObject(enum), - Endpoint(string), - BatchErrors(class), + RealtimeResponseObject(enum), + RealtimeResponseStatus(enum), + RealtimeResponseStatusDetails(class), + RealtimeResponseStatusDetailsType(enum), + RealtimeResponseStatusDetailsReason(enum), + RealtimeResponseStatusDetailsError(class), + Type(string), + Code(string), + Output(array), + RealtimeConversationItem(ref), + RealtimeResponseUsage(class), + TotalTokens(int), + InputTokens(int), + OutputTokens(int), + RealtimeResponseUsageInputTokenDetails(class), + CachedTokens(int), + TextTokens(int), + AudioTokens(int), + RealtimeResponseUsageOutputTokenDetails(class), + TextTokens(int), + AudioTokens(int), + RealtimeServerEventConversationCreated(class), + EventId(string), + RealtimeServerEventConversationCreatedType(enum), + RealtimeServerEventConversationCreatedConversation(class), + Id(string), Object(string), - Data(array), - BatchErrorsDataItem(class), - Code(string), - Message(string), - Param(string), - Line(int), - InputFileId(string), - CompletionWindow(string), - BatchStatus(enum), - OutputFileId(string), - ErrorFileId(string), + RealtimeServerEventConversationItemCreated(class), + EventId(string), + RealtimeServerEventConversationItemCreatedType(enum), + PreviousItemId(string), + RealtimeConversationItem(ref), + RealtimeServerEventConversationItemDeleted(class), + EventId(string), + RealtimeServerEventConversationItemDeletedType(enum), + ItemId(string), + RealtimeServerEventConversationItemInputAudioTranscriptionCompleted(class), + EventId(string), + RealtimeServerEventConversationItemInputAudioTranscriptionCompletedType(enum), + ItemId(string), + ContentIndex(int), + Transcript(string), + RealtimeServerEventConversationItemInputAudioTranscriptionFailed(class), + EventId(string), + RealtimeServerEventConversationItemInputAudioTranscriptionFailedType(enum), + ItemId(string), + ContentIndex(int), + RealtimeServerEventConversationItemInputAudioTranscriptionFailedError(class), + Type(string), + Code(string), + Message(string), + Param(string), + RealtimeServerEventConversationItemTruncated(class), + EventId(string), + RealtimeServerEventConversationItemTruncatedType(enum), + ItemId(string), + ContentIndex(int), + AudioEndMs(int), + RealtimeServerEventError(class), + EventId(string), + RealtimeServerEventErrorType(enum), + RealtimeServerEventErrorError(class), + Type(string), + Code(string), + Message(string), + Param(string), + EventId(string), + RealtimeServerEventInputAudioBufferCleared(class), + EventId(string), + RealtimeServerEventInputAudioBufferClearedType(enum), + RealtimeServerEventInputAudioBufferCommitted(class), + EventId(string), + RealtimeServerEventInputAudioBufferCommittedType(enum), + PreviousItemId(string), + ItemId(string), + RealtimeServerEventInputAudioBufferSpeechStarted(class), + EventId(string), + RealtimeServerEventInputAudioBufferSpeechStartedType(enum), + AudioStartMs(int), + ItemId(string), + RealtimeServerEventInputAudioBufferSpeechStopped(class), + EventId(string), + RealtimeServerEventInputAudioBufferSpeechStoppedType(enum), + AudioEndMs(int), + ItemId(string), + RealtimeServerEventRateLimitsUpdated(class), + EventId(string), + RealtimeServerEventRateLimitsUpdatedType(enum), + RateLimits(array), + RealtimeServerEventRateLimitsUpdatedRateLimit(class), + Name(string), + Limit(int), + Remaining(int), + ResetSeconds(double), + RealtimeServerEventResponseAudioDelta(class), + EventId(string), + RealtimeServerEventResponseAudioDeltaType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + Delta(string), + RealtimeServerEventResponseAudioDone(class), + EventId(string), + RealtimeServerEventResponseAudioDoneType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + RealtimeServerEventResponseAudioTranscriptDelta(class), + EventId(string), + RealtimeServerEventResponseAudioTranscriptDeltaType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + Delta(string), + RealtimeServerEventResponseAudioTranscriptDone(class), + EventId(string), + RealtimeServerEventResponseAudioTranscriptDoneType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + Transcript(string), + RealtimeServerEventResponseContentPartAdded(class), + EventId(string), + RealtimeServerEventResponseContentPartAddedType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + RealtimeServerEventResponseContentPartAddedPart(class), + RealtimeServerEventResponseContentPartAddedPartType(enum), + Text(string), + Audio(string), + Transcript(string), + RealtimeServerEventResponseContentPartDone(class), + EventId(string), + RealtimeServerEventResponseContentPartDoneType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + RealtimeServerEventResponseContentPartDonePart(class), + Type(string), + Text(string), + Audio(string), + Transcript(string), + RealtimeServerEventResponseCreated(class), + EventId(string), + RealtimeServerEventResponseCreatedType(enum), + RealtimeResponse(ref), + RealtimeServerEventResponseDone(class), + EventId(string), + RealtimeServerEventResponseDoneType(enum), + RealtimeResponse(ref), + RealtimeServerEventResponseFunctionCallArgumentsDelta(class), + EventId(string), + RealtimeServerEventResponseFunctionCallArgumentsDeltaType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + CallId(string), + Delta(string), + RealtimeServerEventResponseFunctionCallArgumentsDone(class), + EventId(string), + RealtimeServerEventResponseFunctionCallArgumentsDoneType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + CallId(string), + Arguments(string), + RealtimeServerEventResponseOutputItemAdded(class), + EventId(string), + RealtimeServerEventResponseOutputItemAddedType(enum), + ResponseId(string), + OutputIndex(int), + RealtimeConversationItem(ref), + RealtimeServerEventResponseOutputItemDone(class), + EventId(string), + RealtimeServerEventResponseOutputItemDoneType(enum), + ResponseId(string), + OutputIndex(int), + RealtimeConversationItem(ref), + RealtimeServerEventResponseTextDelta(class), + EventId(string), + RealtimeServerEventResponseTextDeltaType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + Delta(string), + RealtimeServerEventResponseTextDone(class), + EventId(string), + RealtimeServerEventResponseTextDoneType(enum), + ResponseId(string), + ItemId(string), + OutputIndex(int), + ContentIndex(int), + Text(string), + RealtimeServerEventSessionCreated(class), + EventId(string), + RealtimeServerEventSessionCreatedType(enum), + RealtimeSession(ref), + RealtimeServerEventSessionUpdated(class), + EventId(string), + RealtimeServerEventSessionUpdatedType(enum), + RealtimeSession(ref), + RealtimeSession(class), + RealtimeSessionModalities(class), + RealtimeSessionModalitie(enum), + Instructions(string), + RealtimeSessionVoice(enum), + InputAudioFormat(string), + OutputAudioFormat(string), + RealtimeSessionInputAudioTranscription(class), + Model(string), + RealtimeSessionTurnDetection(class), + Type(string), + Threshold(double), + PrefixPaddingMs(int), + SilenceDurationMs(int), + Tools(array), + RealtimeSessionTool(class), + RealtimeSessionToolType(enum), + Name(string), + Description(string), + RealtimeSessionToolParameters(class), + ToolChoice(string), + Temperature(double), + MaxResponseOutputTokens(oneOf), + MaxResponseOutputTokensVariant1(int), + RealtimeSessionMaxResponseOutputTokens(enum), + ResponseFormatJsonObject(class), + ResponseFormatJsonObjectType(enum), + ResponseFormatJsonSchema(class), + ResponseFormatJsonSchemaType(enum), + ResponseFormatJsonSchemaJsonSchema(class), + Description(string), + Name(string), + ResponseFormatJsonSchemaSchema(ref), + Strict(bool), + ResponseFormatJsonSchemaSchema(class), + ResponseFormatText(class), + ResponseFormatTextType(enum), + RunCompletionUsage(class), + CompletionTokens(int), + PromptTokens(int), + TotalTokens(int), + RunObject(class), + Id(string), + RunObjectObject(enum), CreatedAt(int), - InProgressAt(int), + ThreadId(string), + AssistantId(string), + RunObjectStatus(enum), + RunObjectRequiredAction(class), + RunObjectRequiredActionType(enum), + RunObjectRequiredActionSubmitToolOutputs(class), + ToolCalls(array), + RunToolCallObject(ref), + RunObjectLastError(class), + RunObjectLastErrorCode(enum), + Message(string), ExpiresAt(int), - FinalizingAt(int), - CompletedAt(int), - FailedAt(int), - ExpiredAt(int), - CancellingAt(int), + StartedAt(int), CancelledAt(int), - BatchRequestCounts(class), - Total(int), - Completed(int), - Failed(int), - BatchMetadata(class), - BatchRequestInput(class), - CustomId(string), - BatchRequestInputMethod(enum), - Url(string), - BatchRequestOutput(class), + FailedAt(int), + CompletedAt(int), + RunObjectIncompleteDetails(class), + RunObjectIncompleteDetailsReason(enum), + Model(string), + Instructions(string), + Tools(array), + ToolsItem8(oneOf), + AssistantToolsCode(ref), + AssistantToolsFileSearch(ref), + AssistantToolsFunction(ref), + RunObjectToolDiscriminator(class), + RunObjectToolDiscriminatorType(enum), + RunObjectMetadata(class), + RunCompletionUsage(ref), + Temperature(double), + TopP(double), + MaxPromptTokens(int), + MaxCompletionTokens(int), + TruncationObject(ref), + AssistantsApiToolChoiceOption(ref), + ParallelToolCalls(ref), + AssistantsApiResponseFormatOption(ref), + RunStepCompletionUsage(class), + CompletionTokens(int), + PromptTokens(int), + TotalTokens(int), + RunStepDeltaObject(class), + Id(string), + RunStepDeltaObjectObject(enum), + RunStepDeltaObjectDelta(class), + RunStepDeltaObjectDeltaStepDetails(class), + RunStepDeltaStepDetailsMessageCreationObject(ref), + RunStepDeltaStepDetailsToolCallsObject(ref), + RunStepDeltaObjectDeltaStepDetailsDiscriminator(class), + RunStepDeltaObjectDeltaStepDetailsDiscriminatorType(enum), + RunStepDeltaStepDetailsMessageCreationObject(class), + RunStepDeltaStepDetailsMessageCreationObjectType(enum), + RunStepDeltaStepDetailsMessageCreationObjectMessageCreation(class), + MessageId(string), + RunStepDeltaStepDetailsToolCallsCodeObject(class), + Index(int), + Id(string), + RunStepDeltaStepDetailsToolCallsCodeObjectType(enum), + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreter(class), + Input(string), + Outputs(array), + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class), + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(ref), + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(ref), + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class), + RunStepDeltaStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum), + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject(class), + Index(int), + RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectType(enum), + RunStepDeltaStepDetailsToolCallsCodeOutputImageObjectImage(class), + FileId(string), + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject(class), + Index(int), + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjectType(enum), + Logs(string), + RunStepDeltaStepDetailsToolCallsFileSearchObject(class), + Index(int), + Id(string), + RunStepDeltaStepDetailsToolCallsFileSearchObjectType(enum), + RunStepDeltaStepDetailsToolCallsFileSearchObjectFileSearch(class), + RunStepDeltaStepDetailsToolCallsFunctionObject(class), + Index(int), Id(string), - CustomId(string), - BatchRequestOutputResponse(class), - StatusCode(int), - RequestId(string), - BatchRequestOutputResponseBody(class), - BatchRequestOutputError(class), - Code(string), - Message(string), - ListBatchesResponse(class), - Data(array), - Batch(ref), - FirstId(string), - LastId(string), - HasMore(bool), - ListBatchesResponseObject(enum), - AuditLogActorServiceAccount(class), + RunStepDeltaStepDetailsToolCallsFunctionObjectType(enum), + RunStepDeltaStepDetailsToolCallsFunctionObjectFunction(class), + Name(string), + Arguments(string), + Output(string), + RunStepDeltaStepDetailsToolCallsObject(class), + RunStepDeltaStepDetailsToolCallsObjectType(enum), + ToolCalls(array), + ToolCallsItem(oneOf), + RunStepDeltaStepDetailsToolCallsCodeObject(ref), + RunStepDeltaStepDetailsToolCallsFileSearchObject(ref), + RunStepDeltaStepDetailsToolCallsFunctionObject(ref), + RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminator(class), + RunStepDeltaStepDetailsToolCallsObjectToolCallDiscriminatorType(enum), + RunStepDetailsMessageCreationObject(class), + RunStepDetailsMessageCreationObjectType(enum), + RunStepDetailsMessageCreationObjectMessageCreation(class), + MessageId(string), + RunStepDetailsToolCallsCodeObject(class), Id(string), - AuditLogActorUser(class), + RunStepDetailsToolCallsCodeObjectType(enum), + RunStepDetailsToolCallsCodeObjectCodeInterpreter(class), + Input(string), + Outputs(array), + RunStepDetailsToolCallsCodeObjectCodeInterpreterOutput(class), + RunStepDetailsToolCallsCodeOutputLogsObject(ref), + RunStepDetailsToolCallsCodeOutputImageObject(ref), + RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminator(class), + RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputDiscriminatorType(enum), + RunStepDetailsToolCallsCodeOutputImageObject(class), + RunStepDetailsToolCallsCodeOutputImageObjectType(enum), + RunStepDetailsToolCallsCodeOutputImageObjectImage(class), + FileId(string), + RunStepDetailsToolCallsCodeOutputLogsObject(class), + RunStepDetailsToolCallsCodeOutputLogsObjectType(enum), + Logs(string), + RunStepDetailsToolCallsFileSearchObject(class), Id(string), - Email(string), - AuditLogActorApiKey(class), + RunStepDetailsToolCallsFileSearchObjectType(enum), + RunStepDetailsToolCallsFileSearchObjectFileSearch(class), + RunStepDetailsToolCallsFileSearchRankingOptionsObject(ref), + Results(array), + RunStepDetailsToolCallsFileSearchResultObject(ref), + RunStepDetailsToolCallsFileSearchRankingOptionsObject(class), + RunStepDetailsToolCallsFileSearchRankingOptionsObjectRanker(enum), + ScoreThreshold(double), + RunStepDetailsToolCallsFileSearchResultObject(class), + FileId(string), + FileName(string), + Score(double), + Content(array), + RunStepDetailsToolCallsFileSearchResultObjectContentItem(class), + RunStepDetailsToolCallsFileSearchResultObjectContentItemType(enum), + Text(string), + RunStepDetailsToolCallsFunctionObject(class), Id(string), - AuditLogActorApiKeyType(enum), - AuditLogActorUser(ref), - AuditLogActorServiceAccount(ref), - AuditLogActorSession(class), - AuditLogActorUser(ref), - IpAddress(string), - AuditLogActor(class), - AuditLogActorType(enum), - AuditLogActorSession(ref), - AuditLogActorApiKey(ref), - AuditLogEventType(enum), - AuditLog(class), + RunStepDetailsToolCallsFunctionObjectType(enum), + RunStepDetailsToolCallsFunctionObjectFunction(class), + Name(string), + Arguments(string), + Output(string), + RunStepDetailsToolCallsObject(class), + RunStepDetailsToolCallsObjectType(enum), + ToolCalls(array), + ToolCallsItem2(oneOf), + RunStepDetailsToolCallsCodeObject(ref), + RunStepDetailsToolCallsFileSearchObject(ref), + RunStepDetailsToolCallsFunctionObject(ref), + RunStepDetailsToolCallsObjectToolCallDiscriminator(class), + RunStepDetailsToolCallsObjectToolCallDiscriminatorType(enum), + RunStepObject(class), Id(string), - AuditLogEventType(ref), - EffectiveAt(int), - AuditLogProject(class), - Id(string), + RunStepObjectObject(enum), + CreatedAt(int), + AssistantId(string), + ThreadId(string), + RunId(string), + RunStepObjectType(enum), + RunStepObjectStatus(enum), + RunStepObjectStepDetails(class), + RunStepDetailsMessageCreationObject(ref), + RunStepDetailsToolCallsObject(ref), + RunStepObjectStepDetailsDiscriminator(class), + RunStepObjectStepDetailsDiscriminatorType(enum), + RunStepObjectLastError(class), + RunStepObjectLastErrorCode(enum), + Message(string), + ExpiredAt(int), + CancelledAt(int), + FailedAt(int), + CompletedAt(int), + RunStepObjectMetadata(class), + RunStepCompletionUsage(ref), + RunToolCallObject(class), + Id(string), + RunToolCallObjectType(enum), + RunToolCallObjectFunction(class), Name(string), - AuditLogActor(ref), - AuditLogApiKeyCreated(class), - Id(string), - AuditLogApiKeyCreatedData(class), - Scopes(array), - ScopesItem(string), - AuditLogApiKeyUpdated(class), - Id(string), - AuditLogApiKeyUpdatedChangesRequested(class), - Scopes(array), - ScopesItem(string), - AuditLogApiKeyDeleted(class), - Id(string), - AuditLogInviteSent(class), - Id(string), - AuditLogInviteSentData(class), - Email(string), - Role(string), - AuditLogInviteAccepted(class), - Id(string), - AuditLogInviteDeleted(class), - Id(string), - AuditLogLoginFailed(class), - ErrorCode(string), - ErrorMessage(string), - AuditLogLogoutFailed(class), - ErrorCode(string), - ErrorMessage(string), - AuditLogOrganizationUpdated(class), - Id(string), - AuditLogOrganizationUpdatedChangesRequested(class), - Title(string), - Description(string), - Name(string), - AuditLogOrganizationUpdatedChangesRequestedSettings(class), - ThreadsUiVisibility(string), - UsageDashboardVisibility(string), - AuditLogProjectCreated(class), - Id(string), - AuditLogProjectCreatedData(class), - Name(string), - Title(string), - AuditLogProjectUpdated(class), - Id(string), - AuditLogProjectUpdatedChangesRequested(class), - Title(string), - AuditLogProjectArchived(class), - Id(string), - AuditLogServiceAccountCreated(class), - Id(string), - AuditLogServiceAccountCreatedData(class), - Role(string), - AuditLogServiceAccountUpdated(class), - Id(string), - AuditLogServiceAccountUpdatedChangesRequested(class), - Role(string), - AuditLogServiceAccountDeleted(class), - Id(string), - AuditLogUserAdded(class), - Id(string), - AuditLogUserAddedData(class), - Role(string), - AuditLogUserUpdated(class), - Id(string), - AuditLogUserUpdatedChangesRequested(class), - Role(string), - AuditLogUserDeleted(class), - Id(string), - ListAuditLogsResponse(class), - ListAuditLogsResponseObject(enum), - Data(array), - AuditLog(ref), - FirstId(string), - LastId(string), - HasMore(bool), - Invite(class), - InviteObject(enum), + Arguments(string), + StaticChunkingStrategy(class), + MaxChunkSizeTokens(int), + ChunkOverlapTokens(int), + StaticChunkingStrategyRequestParam(class), + StaticChunkingStrategyRequestParamType(enum), + StaticChunkingStrategy(ref), + StaticChunkingStrategyResponseParam(class), + StaticChunkingStrategyResponseParamType(enum), + StaticChunkingStrategy(ref), + SubmitToolOutputsRunRequest(class), + ToolOutputs(array), + SubmitToolOutputsRunRequestToolOutput(class), + ToolCallId(string), + Output(string), + Stream(bool), + ThreadObject(class), Id(string), - Email(string), - InviteRole(enum), - InviteStatus(enum), - InvitedAt(int), + ThreadObjectObject(enum), + CreatedAt(int), + ThreadObjectToolResources(class), + ThreadObjectToolResourcesCodeInterpreter(class), + FileIds(array), + FileIdsItem(string), + ThreadObjectToolResourcesFileSearch(class), + VectorStoreIds(array), + VectorStoreIdsItem(string), + ThreadObjectMetadata(class), + TranscriptionSegment(class), + Id(int), + Seek(int), + Start(float), + End(float), + Text(string), + Tokens(array), + TokensItem(int), + Temperature(float), + AvgLogprob(float), + CompressionRatio(float), + NoSpeechProb(float), + TranscriptionWord(class), + Word(string), + Start(float), + End(float), + TruncationObject(class), + TruncationObjectType(enum), + LastMessages(int), + UpdateVectorStoreRequest(class), + Name(string), + VectorStoreExpirationAfter(ref), + UpdateVectorStoreRequestMetadata(class), + Upload(class), + Id(string), + CreatedAt(int), + Filename(string), + Bytes(int), + Purpose(string), + UploadStatus(enum), ExpiresAt(int), - AcceptedAt(int), - InviteListResponse(class), - InviteListResponseObject(enum), + UploadObject(enum), + OpenAIFile(ref), + UploadPart(class), + Id(string), + CreatedAt(int), + UploadId(string), + UploadPartObject(enum), + UsageAudioSpeechesResult(class), + UsageAudioSpeechesResultObject(enum), + Characters(int), + NumModelRequests(int), + ProjectId(string), + UserId(string), + ApiKeyId(string), + Model(string), + UsageAudioTranscriptionsResult(class), + UsageAudioTranscriptionsResultObject(enum), + Seconds(int), + NumModelRequests(int), + ProjectId(string), + UserId(string), + ApiKeyId(string), + Model(string), + UsageCodeInterpreterSessionsResult(class), + UsageCodeInterpreterSessionsResultObject(enum), + Sessions(int), + ProjectId(string), + UsageCompletionsResult(class), + UsageCompletionsResultObject(enum), + InputTokens(int), + InputCachedTokens(int), + OutputTokens(int), + NumModelRequests(int), + ProjectId(string), + UserId(string), + ApiKeyId(string), + Model(string), + Batch(bool), + UsageEmbeddingsResult(class), + UsageEmbeddingsResultObject(enum), + InputTokens(int), + NumModelRequests(int), + ProjectId(string), + UserId(string), + ApiKeyId(string), + Model(string), + UsageImagesResult(class), + UsageImagesResultObject(enum), + Images(int), + NumModelRequests(int), + Source(string), + Size(string), + ProjectId(string), + UserId(string), + ApiKeyId(string), + Model(string), + UsageModerationsResult(class), + UsageModerationsResultObject(enum), + InputTokens(int), + NumModelRequests(int), + ProjectId(string), + UserId(string), + ApiKeyId(string), + Model(string), + UsageResponse(class), + UsageResponseObject(enum), Data(array), - Invite(ref), - FirstId(string), - LastId(string), + UsageTimeBucket(ref), HasMore(bool), - InviteRequest(class), - Email(string), - InviteRequestRole(enum), - InviteDeleteResponse(class), - InviteDeleteResponseObject(enum), - Id(string), - Deleted(bool), + NextPage(string), + UsageTimeBucket(class), + UsageTimeBucketObject(enum), + StartTime(int), + EndTime(int), + Result(array), + ResultItem(oneOf), + UsageCompletionsResult(ref), + UsageEmbeddingsResult(ref), + UsageModerationsResult(ref), + UsageImagesResult(ref), + UsageAudioSpeechesResult(ref), + UsageAudioTranscriptionsResult(ref), + UsageVectorStoresResult(ref), + UsageCodeInterpreterSessionsResult(ref), + CostsResult(ref), + UsageTimeBucketResultItemDiscriminator(class), + UsageTimeBucketResultItemDiscriminatorObject(enum), + UsageVectorStoresResult(class), + UsageVectorStoresResultObject(enum), + UsageBytes(int), + ProjectId(string), User(class), UserObject(enum), Id(string), @@ -1632,6 +2200,10 @@ Email(string), UserRole(enum), AddedAt(int), + UserDeleteResponse(class), + UserDeleteResponseObject(enum), + Id(string), + Deleted(bool), UserListResponse(class), UserListResponseObject(enum), Data(array), @@ -1641,175 +2213,312 @@ HasMore(bool), UserRoleUpdateRequest(class), UserRoleUpdateRequestRole(enum), - UserDeleteResponse(class), - UserDeleteResponseObject(enum), - Id(string), - Deleted(bool), - Project(class), + VectorStoreExpirationAfter(class), + VectorStoreExpirationAfterAnchor(enum), + Days(int), + VectorStoreFileBatchObject(class), Id(string), - ProjectObject(enum), - Name(string), + VectorStoreFileBatchObjectObject(enum), CreatedAt(int), - ArchivedAt(int), - ProjectStatus(enum), - ProjectListResponse(class), - ProjectListResponseObject(enum), - Data(array), - Project(ref), - FirstId(string), - LastId(string), - HasMore(bool), - ProjectCreateRequest(class), - Name(string), - ProjectUpdateRequest(class), - Name(string), - DefaultProjectErrorResponse(class), - Code(int), - Message(string), - ProjectUser(class), - ProjectUserObject(enum), - Id(string), - Name(string), - Email(string), - ProjectUserRole(enum), - AddedAt(int), - ProjectUserListResponse(class), - Object(string), - Data(array), - ProjectUser(ref), - FirstId(string), - LastId(string), - HasMore(bool), - ProjectUserCreateRequest(class), - UserId(string), - ProjectUserCreateRequestRole(enum), - ProjectUserUpdateRequest(class), - ProjectUserUpdateRequestRole(enum), - ProjectUserDeleteResponse(class), - ProjectUserDeleteResponseObject(enum), - Id(string), - Deleted(bool), - ProjectServiceAccount(class), - ProjectServiceAccountObject(enum), + VectorStoreId(string), + VectorStoreFileBatchObjectStatus(enum), + VectorStoreFileBatchObjectFileCounts(class), + InProgress(int), + Completed(int), + Failed(int), + Cancelled(int), + Total(int), + VectorStoreFileObject(class), Id(string), - Name(string), - ProjectServiceAccountRole(enum), + VectorStoreFileObjectObject(enum), + UsageBytes(int), CreatedAt(int), - ProjectServiceAccountListResponse(class), - ProjectServiceAccountListResponseObject(enum), - Data(array), - ProjectServiceAccount(ref), - FirstId(string), - LastId(string), - HasMore(bool), - ProjectServiceAccountCreateRequest(class), - Name(string), - ProjectServiceAccountCreateResponse(class), - ProjectServiceAccountCreateResponseObject(enum), + VectorStoreId(string), + VectorStoreFileObjectStatus(enum), + VectorStoreFileObjectLastError(class), + VectorStoreFileObjectLastErrorCode(enum), + Message(string), + VectorStoreFileObjectChunkingStrategy(class), + StaticChunkingStrategyResponseParam(ref), + OtherChunkingStrategyResponseParam(ref), + VectorStoreFileObjectChunkingStrategyDiscriminator(class), + VectorStoreFileObjectChunkingStrategyDiscriminatorType(enum), + VectorStoreObject(class), Id(string), - Name(string), - ProjectServiceAccountCreateResponseRole(enum), - CreatedAt(int), - ProjectServiceAccountApiKey(ref), - ProjectServiceAccountApiKey(class), - ProjectServiceAccountApiKeyObject(enum), - Value(string), - Name(string), + VectorStoreObjectObject(enum), CreatedAt(int), - Id(string), - ProjectServiceAccountDeleteResponse(class), - ProjectServiceAccountDeleteResponseObject(enum), - Id(string), - Deleted(bool), - ProjectApiKey(class), - ProjectApiKeyObject(enum), - RedactedValue(string), Name(string), - CreatedAt(int), - Id(string), - ProjectApiKeyOwner(class), - ProjectApiKeyOwnerType(enum), - ProjectUser(ref), - ProjectServiceAccount(ref), - ProjectApiKeyListResponse(class), - ProjectApiKeyListResponseObject(enum), - Data(array), - ProjectApiKey(ref), - FirstId(string), - LastId(string), - HasMore(bool), - ProjectApiKeyDeleteResponse(class), - ProjectApiKeyDeleteResponseObject(enum), - Id(string), - Deleted(bool), - CreateChatCompletionRequest(ref), - CreateCompletionRequest(ref), - CreateImageRequest(ref), - CreateImageEditRequest(ref), - CreateImageVariationRequest(ref), - CreateEmbeddingRequest(ref), + UsageBytes(int), + VectorStoreObjectFileCounts(class), + InProgress(int), + Completed(int), + Failed(int), + Cancelled(int), + Total(int), + VectorStoreObjectStatus(enum), + VectorStoreExpirationAfter(ref), + ExpiresAt(int), + LastActiveAt(int), + VectorStoreObjectMetadata(class), + CreateAssistantRequest(ref), + ModifyAssistantRequest(ref), CreateSpeechRequest(ref), CreateTranscriptionRequest(ref), CreateTranslationRequest(ref), + CreateBatchRequest(class), + InputFileId(string), + CreateBatchRequestEndpoint(enum), + CreateBatchRequestCompletionWindow(enum), + CreateBatchRequestMetadata(class), + Metadata(string), + CreateChatCompletionRequest(ref), + CreateCompletionRequest(ref), + CreateEmbeddingRequest(ref), CreateFileRequest(ref), - CreateUploadRequest(ref), - AddUploadPartRequest(ref), - CompleteUploadRequest(ref), CreateFineTuningJobRequest(ref), + CreateImageEditRequest(ref), + CreateImageRequest(ref), + CreateImageVariationRequest(ref), CreateModerationRequest(ref), - CreateAssistantRequest(ref), - ModifyAssistantRequest(ref), + InviteRequest(ref), + ProjectCreateRequest(ref), + ProjectUpdateRequest(ref), + ProjectRateLimitUpdateRequest(ref), + ProjectServiceAccountCreateRequest(ref), + ProjectUserCreateRequest(ref), + ProjectUserUpdateRequest(ref), + UserRoleUpdateRequest(ref), CreateThreadRequest(ref), + CreateThreadAndRunRequest(ref), ModifyThreadRequest(ref), CreateMessageRequest(ref), ModifyMessageRequest(ref), - CreateThreadAndRunRequest(ref), CreateRunRequest(ref), ModifyRunRequest(ref), SubmitToolOutputsRunRequest(ref), + CreateUploadRequest(ref), + CompleteUploadRequest(ref), + AddUploadPartRequest(ref), CreateVectorStoreRequest(ref), UpdateVectorStoreRequest(ref), - CreateVectorStoreFileRequest(ref), CreateVectorStoreFileBatchRequest(ref), - CreateBatchRequest(class), - InputFileId(string), - CreateBatchRequestEndpoint(enum), - CreateBatchRequestCompletionWindow(enum), - CreateBatchRequestMetadata(class), - Metadata(string), - InviteRequest(ref), - UserRoleUpdateRequest(ref), - ProjectCreateRequest(ref), - ProjectUpdateRequest(ref), - ProjectUserCreateRequest(ref), - ProjectUserUpdateRequest(ref), - ProjectServiceAccountCreateRequest(ref), + CreateVectorStoreFileRequest(ref), + ListAssistantsLimit(int), + ListAssistantsOrder(enum), + ListAssistantsAfter(string), + ListAssistantsBefore(string), + GetAssistantAssistantId(string), + ModifyAssistantAssistantId(string), + DeleteAssistantAssistantId(string), + ListBatchesAfter(string), + ListBatchesLimit(int), + RetrieveBatchBatchId(string), + CancelBatchBatchId(string), ListFilesPurpose(string), + ListFilesLimit(int), + ListFilesOrder(enum), + ListFilesAfter(string), DeleteFileFileId(string), RetrieveFileFileId(string), DownloadFileFileId(string), - AddUploadPartUploadId(string), - CompleteUploadUploadId(string), - CancelUploadUploadId(string), ListPaginatedFineTuningJobsAfter(string), ListPaginatedFineTuningJobsLimit(int), RetrieveFineTuningJobFineTuningJobId(string), - ListFineTuningEventsFineTuningJobId(string), - ListFineTuningEventsAfter(string), - ListFineTuningEventsLimit(int), CancelFineTuningJobFineTuningJobId(string), ListFineTuningJobCheckpointsFineTuningJobId(string), ListFineTuningJobCheckpointsAfter(string), ListFineTuningJobCheckpointsLimit(int), + ListFineTuningEventsFineTuningJobId(string), + ListFineTuningEventsAfter(string), + ListFineTuningEventsLimit(int), RetrieveModelModel(string), DeleteModelModel(string), - ListAssistantsLimit(int), - ListAssistantsOrder(enum), - ListAssistantsAfter(string), - ListAssistantsBefore(string), - GetAssistantAssistantId(string), - ModifyAssistantAssistantId(string), - DeleteAssistantAssistantId(string), + ListAuditLogsEffectiveAt(class), + Gt(int), + Gte(int), + Lt(int), + Lte(int), + ListAuditLogsProjectIds(array), + ListAuditLogsProjectIdsItem(string), + ListAuditLogsEventTypes(array), + AuditLogEventType(ref), + ListAuditLogsActorIds(array), + ListAuditLogsActorIdsItem(string), + ListAuditLogsActorEmails(array), + ListAuditLogsActorEmailsItem(string), + ListAuditLogsResourceIds(array), + ListAuditLogsResourceIdsItem(string), + ListAuditLogsLimit(int), + ListAuditLogsAfter(string), + ListAuditLogsBefore(string), + UsageCostsStartTime(int), + UsageCostsEndTime(int), + UsageCostsBucketWidth(enum), + UsageCostsProjectIds(array), + UsageCostsProjectIdsItem(string), + UsageCostsGroupBy(array), + UsageCostsGroupByItem(enum), + UsageCostsLimit(int), + UsageCostsPage(string), + ListInvitesLimit(int), + ListInvitesAfter(string), + RetrieveInviteInviteId(string), + DeleteInviteInviteId(string), + ListProjectsLimit(int), + ListProjectsAfter(string), + ListProjectsIncludeArchived(bool), + RetrieveProjectProjectId(string), + ModifyProjectProjectId(string), + ListProjectApiKeysProjectId(string), + ListProjectApiKeysLimit(int), + ListProjectApiKeysAfter(string), + RetrieveProjectApiKeyProjectId(string), + RetrieveProjectApiKeyKeyId(string), + DeleteProjectApiKeyProjectId(string), + DeleteProjectApiKeyKeyId(string), + ArchiveProjectProjectId(string), + ListProjectRateLimitsProjectId(string), + ListProjectRateLimitsLimit(int), + ListProjectRateLimitsAfter(string), + ListProjectRateLimitsBefore(string), + UpdateProjectRateLimitsProjectId(string), + UpdateProjectRateLimitsRateLimitId(string), + ListProjectServiceAccountsProjectId(string), + ListProjectServiceAccountsLimit(int), + ListProjectServiceAccountsAfter(string), + CreateProjectServiceAccountProjectId(string), + RetrieveProjectServiceAccountProjectId(string), + RetrieveProjectServiceAccountServiceAccountId(string), + DeleteProjectServiceAccountProjectId(string), + DeleteProjectServiceAccountServiceAccountId(string), + ListProjectUsersProjectId(string), + ListProjectUsersLimit(int), + ListProjectUsersAfter(string), + CreateProjectUserProjectId(string), + RetrieveProjectUserProjectId(string), + RetrieveProjectUserUserId(string), + ModifyProjectUserProjectId(string), + ModifyProjectUserUserId(string), + DeleteProjectUserProjectId(string), + DeleteProjectUserUserId(string), + UsageAudioSpeechesStartTime(int), + UsageAudioSpeechesEndTime(int), + UsageAudioSpeechesBucketWidth(enum), + UsageAudioSpeechesProjectIds(array), + UsageAudioSpeechesProjectIdsItem(string), + UsageAudioSpeechesUserIds(array), + UsageAudioSpeechesUserIdsItem(string), + UsageAudioSpeechesApiKeyIds(array), + UsageAudioSpeechesApiKeyIdsItem(string), + UsageAudioSpeechesModels(array), + UsageAudioSpeechesModelsItem(string), + UsageAudioSpeechesGroupBy(array), + UsageAudioSpeechesGroupByItem(enum), + UsageAudioSpeechesLimit(int), + UsageAudioSpeechesPage(string), + UsageAudioTranscriptionsStartTime(int), + UsageAudioTranscriptionsEndTime(int), + UsageAudioTranscriptionsBucketWidth(enum), + UsageAudioTranscriptionsProjectIds(array), + UsageAudioTranscriptionsProjectIdsItem(string), + UsageAudioTranscriptionsUserIds(array), + UsageAudioTranscriptionsUserIdsItem(string), + UsageAudioTranscriptionsApiKeyIds(array), + UsageAudioTranscriptionsApiKeyIdsItem(string), + UsageAudioTranscriptionsModels(array), + UsageAudioTranscriptionsModelsItem(string), + UsageAudioTranscriptionsGroupBy(array), + UsageAudioTranscriptionsGroupByItem(enum), + UsageAudioTranscriptionsLimit(int), + UsageAudioTranscriptionsPage(string), + UsageCodeInterpreterSessionsStartTime(int), + UsageCodeInterpreterSessionsEndTime(int), + UsageCodeInterpreterSessionsBucketWidth(enum), + UsageCodeInterpreterSessionsProjectIds(array), + UsageCodeInterpreterSessionsProjectIdsItem(string), + UsageCodeInterpreterSessionsGroupBy(array), + UsageCodeInterpreterSessionsGroupByItem(enum), + UsageCodeInterpreterSessionsLimit(int), + UsageCodeInterpreterSessionsPage(string), + UsageCompletionsStartTime(int), + UsageCompletionsEndTime(int), + UsageCompletionsBucketWidth(enum), + UsageCompletionsProjectIds(array), + UsageCompletionsProjectIdsItem(string), + UsageCompletionsUserIds(array), + UsageCompletionsUserIdsItem(string), + UsageCompletionsApiKeyIds(array), + UsageCompletionsApiKeyIdsItem(string), + UsageCompletionsModels(array), + UsageCompletionsModelsItem(string), + UsageCompletionsBatch(bool), + UsageCompletionsGroupBy(array), + UsageCompletionsGroupByItem(enum), + UsageCompletionsLimit(int), + UsageCompletionsPage(string), + UsageEmbeddingsStartTime(int), + UsageEmbeddingsEndTime(int), + UsageEmbeddingsBucketWidth(enum), + UsageEmbeddingsProjectIds(array), + UsageEmbeddingsProjectIdsItem(string), + UsageEmbeddingsUserIds(array), + UsageEmbeddingsUserIdsItem(string), + UsageEmbeddingsApiKeyIds(array), + UsageEmbeddingsApiKeyIdsItem(string), + UsageEmbeddingsModels(array), + UsageEmbeddingsModelsItem(string), + UsageEmbeddingsGroupBy(array), + UsageEmbeddingsGroupByItem(enum), + UsageEmbeddingsLimit(int), + UsageEmbeddingsPage(string), + UsageImagesStartTime(int), + UsageImagesEndTime(int), + UsageImagesBucketWidth(enum), + UsageImagesSources(array), + UsageImagesSource(enum), + UsageImagesSizes(array), + UsageImagesSize(enum), + UsageImagesProjectIds(array), + UsageImagesProjectIdsItem(string), + UsageImagesUserIds(array), + UsageImagesUserIdsItem(string), + UsageImagesApiKeyIds(array), + UsageImagesApiKeyIdsItem(string), + UsageImagesModels(array), + UsageImagesModelsItem(string), + UsageImagesGroupBy(array), + UsageImagesGroupByItem(enum), + UsageImagesLimit(int), + UsageImagesPage(string), + UsageModerationsStartTime(int), + UsageModerationsEndTime(int), + UsageModerationsBucketWidth(enum), + UsageModerationsProjectIds(array), + UsageModerationsProjectIdsItem(string), + UsageModerationsUserIds(array), + UsageModerationsUserIdsItem(string), + UsageModerationsApiKeyIds(array), + UsageModerationsApiKeyIdsItem(string), + UsageModerationsModels(array), + UsageModerationsModelsItem(string), + UsageModerationsGroupBy(array), + UsageModerationsGroupByItem(enum), + UsageModerationsLimit(int), + UsageModerationsPage(string), + UsageVectorStoresStartTime(int), + UsageVectorStoresEndTime(int), + UsageVectorStoresBucketWidth(enum), + UsageVectorStoresProjectIds(array), + UsageVectorStoresProjectIdsItem(string), + UsageVectorStoresGroupBy(array), + UsageVectorStoresGroupByItem(enum), + UsageVectorStoresLimit(int), + UsageVectorStoresPage(string), + ListUsersLimit(int), + ListUsersAfter(string), + RetrieveUserUserId(string), + ModifyUserUserId(string), + DeleteUserUserId(string), GetThreadThreadId(string), ModifyThreadThreadId(string), DeleteThreadThreadId(string), @@ -1832,12 +2541,12 @@ ListRunsAfter(string), ListRunsBefore(string), CreateRunThreadId(string), + CreateRunInclude(array), + CreateRunIncludeItem(enum), GetRunThreadId(string), GetRunRunId(string), ModifyRunThreadId(string), ModifyRunRunId(string), - SubmitToolOuputsToRunThreadId(string), - SubmitToolOuputsToRunRunId(string), CancelRunThreadId(string), CancelRunRunId(string), ListRunStepsThreadId(string), @@ -1846,9 +2555,18 @@ ListRunStepsOrder(enum), ListRunStepsAfter(string), ListRunStepsBefore(string), + ListRunStepsInclude(array), + ListRunStepsIncludeItem(enum), GetRunStepThreadId(string), GetRunStepRunId(string), GetRunStepStepId(string), + GetRunStepInclude(array), + GetRunStepIncludeItem(enum), + SubmitToolOuputsToRunThreadId(string), + SubmitToolOuputsToRunRunId(string), + CancelUploadUploadId(string), + CompleteUploadUploadId(string), + AddUploadPartUploadId(string), ListVectorStoresLimit(int), ListVectorStoresOrder(enum), ListVectorStoresAfter(string), @@ -1856,17 +2574,6 @@ GetVectorStoreVectorStoreId(string), ModifyVectorStoreVectorStoreId(string), DeleteVectorStoreVectorStoreId(string), - ListVectorStoreFilesVectorStoreId(string), - ListVectorStoreFilesLimit(int), - ListVectorStoreFilesOrder(enum), - ListVectorStoreFilesAfter(string), - ListVectorStoreFilesBefore(string), - ListVectorStoreFilesFilter(enum), - CreateVectorStoreFileVectorStoreId(string), - GetVectorStoreFileVectorStoreId(string), - GetVectorStoreFileFileId(string), - DeleteVectorStoreFileVectorStoreId(string), - DeleteVectorStoreFileFileId(string), CreateVectorStoreFileBatchVectorStoreId(string), GetVectorStoreFileBatchVectorStoreId(string), GetVectorStoreFileBatchBatchId(string), @@ -1879,70 +2586,22 @@ ListFilesInVectorStoreBatchAfter(string), ListFilesInVectorStoreBatchBefore(string), ListFilesInVectorStoreBatchFilter(enum), - ListBatchesAfter(string), - ListBatchesLimit(int), - RetrieveBatchBatchId(string), - CancelBatchBatchId(string), - ListAuditLogsEffectiveAt(class), - Gt(int), - Gte(int), - Lt(int), - Lte(int), - ListAuditLogsProjectIds(array), - ListAuditLogsProjectIdsItem(string), - ListAuditLogsEventTypes(array), - AuditLogEventType(ref), - ListAuditLogsActorIds(array), - ListAuditLogsActorIdsItem(string), - ListAuditLogsActorEmails(array), - ListAuditLogsActorEmailsItem(string), - ListAuditLogsResourceIds(array), - ListAuditLogsResourceIdsItem(string), - ListAuditLogsLimit(int), - ListAuditLogsAfter(string), - ListAuditLogsBefore(string), - ListInvitesLimit(int), - ListInvitesAfter(string), - RetrieveInviteInviteId(string), - DeleteInviteInviteId(string), - ListUsersLimit(int), - ListUsersAfter(string), - RetrieveUserUserId(string), - DeleteUserUserId(string), - ListProjectsLimit(int), - ListProjectsAfter(string), - ListProjectsIncludeArchived(bool), - RetrieveProjectProjectId(string), - ArchiveProjectProjectId(string), - ListProjectUsersProjectId(string), - ListProjectUsersLimit(int), - ListProjectUsersAfter(string), - CreateProjectUserProjectId(string), - RetrieveProjectUserProjectId(string), - RetrieveProjectUserUserId(string), - DeleteProjectUserProjectId(string), - DeleteProjectUserUserId(string), - ListProjectServiceAccountsProjectId(string), - ListProjectServiceAccountsLimit(int), - ListProjectServiceAccountsAfter(string), - CreateProjectServiceAccountProjectId(string), - RetrieveProjectServiceAccountProjectId(string), - RetrieveProjectServiceAccountServiceAccountId(string), - DeleteProjectServiceAccountProjectId(string), - DeleteProjectServiceAccountServiceAccountId(string), - ListProjectApiKeysProjectId(string), - ListProjectApiKeysLimit(int), - ListProjectApiKeysAfter(string), - RetrieveProjectApiKeyProjectId(string), - RetrieveProjectApiKeyKeyId(string), - DeleteProjectApiKeyProjectId(string), - DeleteProjectApiKeyKeyId(string), - CreateChatCompletionResponse(ref), - CreateCompletionResponse(ref), - ImagesResponse(ref), - ImagesResponse(ref), - ImagesResponse(ref), - CreateEmbeddingResponse(ref), + ListVectorStoreFilesVectorStoreId(string), + ListVectorStoreFilesLimit(int), + ListVectorStoreFilesOrder(enum), + ListVectorStoreFilesAfter(string), + ListVectorStoreFilesBefore(string), + ListVectorStoreFilesFilter(enum), + CreateVectorStoreFileVectorStoreId(string), + GetVectorStoreFileVectorStoreId(string), + GetVectorStoreFileFileId(string), + DeleteVectorStoreFileVectorStoreId(string), + DeleteVectorStoreFileFileId(string), + ListAssistantsResponse(ref), + AssistantObject(ref), + AssistantObject(ref), + AssistantObject(ref), + DeleteAssistantResponse(ref), CreateSpeechResponse(byte[]), CreateTranscriptionResponse(oneOf), CreateTranscriptionResponseJson(ref), @@ -1950,31 +2609,79 @@ CreateTranslationResponse(oneOf), CreateTranslationResponseJson(ref), CreateTranslationResponseVerboseJson(ref), + Batch(ref), + ListBatchesResponse(ref), + Batch(ref), + Batch(ref), + CreateChatCompletionResponse(ref), + CreateCompletionResponse(ref), + CreateEmbeddingResponse(ref), ListFilesResponse(ref), OpenAIFile(ref), DeleteFileResponse(ref), OpenAIFile(ref), DownloadFileResponse(string), - Upload(ref), - UploadPart(ref), - Upload(ref), - Upload(ref), FineTuningJob(ref), ListPaginatedFineTuningJobsResponse(ref), FineTuningJob(ref), - ListFineTuningJobEventsResponse(ref), FineTuningJob(ref), ListFineTuningJobCheckpointsResponse(ref), + ListFineTuningJobEventsResponse(ref), + ImagesResponse(ref), + ImagesResponse(ref), + ImagesResponse(ref), ListModelsResponse(ref), - Model12(ref), + Model15(ref), DeleteModelResponse(ref), CreateModerationResponse(ref), - ListAssistantsResponse(ref), - AssistantObject(ref), - AssistantObject(ref), - AssistantObject(ref), - DeleteAssistantResponse(ref), + ListAuditLogsResponse(ref), + UsageResponse(ref), + InviteListResponse(ref), + Invite(ref), + Invite(ref), + InviteDeleteResponse(ref), + ProjectListResponse(ref), + Project(ref), + Project(ref), + Project(ref), + ErrorResponse(ref), + ProjectApiKeyListResponse(ref), + ProjectApiKey(ref), + ProjectApiKeyDeleteResponse(ref), + ErrorResponse(ref), + Project(ref), + ProjectRateLimitListResponse(ref), + ProjectRateLimit(ref), + ErrorResponse(ref), + ProjectServiceAccountListResponse(ref), + ErrorResponse(ref), + ProjectServiceAccountCreateResponse(ref), + ErrorResponse(ref), + ProjectServiceAccount(ref), + ProjectServiceAccountDeleteResponse(ref), + ProjectUserListResponse(ref), + ErrorResponse(ref), + ProjectUser(ref), + ErrorResponse(ref), + ProjectUser(ref), + ProjectUser(ref), + ErrorResponse(ref), + ProjectUserDeleteResponse(ref), + ErrorResponse(ref), + UsageResponse(ref), + UsageResponse(ref), + UsageResponse(ref), + UsageResponse(ref), + UsageResponse(ref), + UsageResponse(ref), + UsageResponse(ref), + UsageResponse(ref), + UserListResponse(ref), + User(ref), + User(ref), + UserDeleteResponse(ref), ThreadObject(ref), + RunObject(ref), ThreadObject(ref), ThreadObject(ref), DeleteThreadResponse(ref), @@ -1983,64 +2690,29 @@ MessageObject(ref), MessageObject(ref), DeleteMessageResponse(ref), - RunObject(ref), ListRunsResponse(ref), RunObject(ref), RunObject(ref), RunObject(ref), RunObject(ref), - RunObject(ref), ListRunStepsResponse(ref), RunStepObject(ref), + RunObject(ref), + Upload(ref), + Upload(ref), + Upload(ref), + UploadPart(ref), ListVectorStoresResponse(ref), VectorStoreObject(ref), VectorStoreObject(ref), VectorStoreObject(ref), DeleteVectorStoreResponse(ref), - ListVectorStoreFilesResponse(ref), - VectorStoreFileObject(ref), - VectorStoreFileObject(ref), - DeleteVectorStoreFileResponse(ref), VectorStoreFileBatchObject(ref), VectorStoreFileBatchObject(ref), VectorStoreFileBatchObject(ref), ListVectorStoreFilesResponse(ref), - Batch(ref), - ListBatchesResponse(ref), - Batch(ref), - Batch(ref), - ListAuditLogsResponse(ref), - InviteListResponse(ref), - Invite(ref), - Invite(ref), - InviteDeleteResponse(ref), - UserListResponse(ref), - User(ref), - User(ref), - UserDeleteResponse(ref), - ProjectListResponse(ref), - Project(ref), - Project(ref), - Project(ref), - ErrorResponse(ref), - Project(ref), - ProjectUserListResponse(ref), - ErrorResponse(ref), - ProjectUser(ref), - ErrorResponse(ref), - ProjectUser(ref), - ProjectUser(ref), - ErrorResponse(ref), - ProjectUserDeleteResponse(ref), - ErrorResponse(ref), - ProjectServiceAccountListResponse(ref), - ErrorResponse(ref), - ProjectServiceAccountCreateResponse(ref), - ErrorResponse(ref), - ProjectServiceAccount(ref), - ProjectServiceAccountDeleteResponse(ref), - ProjectApiKeyListResponse(ref), - ProjectApiKey(ref), - ProjectApiKeyDeleteResponse(ref), - ErrorResponse(ref) + ListVectorStoreFilesResponse(ref), + VectorStoreFileObject(ref), + VectorStoreFileObject(ref), + DeleteVectorStoreFileResponse(ref) ] \ No newline at end of file diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_NewWarnings.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_NewWarnings.verified.txt index cc16fe346a..ad47dbb93f 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_NewWarnings.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_NewWarnings.verified.txt @@ -1,7 +1 @@ -[ - { - RuleName: SchemaMismatchedDataType, - Message: Data and type mismatch found., - Pointer: #/components/schemas/CreateCompletionRequest/properties/logprobs/default - } -] \ No newline at end of file +[] \ No newline at end of file diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_OriginalWarnings.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_OriginalWarnings.verified.txt index cc16fe346a..ad47dbb93f 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_OriginalWarnings.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/ProcessingTests.ComputeDiscriminators_OriginalWarnings.verified.txt @@ -1,7 +1 @@ -[ - { - RuleName: SchemaMismatchedDataType, - Message: Data and type mismatch found., - Pointer: #/components/schemas/CreateCompletionRequest/properties/logprobs/default - } -] \ No newline at end of file +[] \ No newline at end of file diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/_.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/_.verified.txt index c11e83471e..f8c94fe154 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/_.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/Processing/DetectedDiscriminators/openai.yaml/_.verified.txt @@ -13,72 +13,66 @@ info: servers: - url: https://api.openai.com/v1 paths: - /chat/completions: - post: + /assistants: + get: tags: - - Chat - summary: Creates a model response for the given chat conversation. - operationId: createChatCompletion - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionRequest' - required: true + - Assistants + summary: Returns a list of assistants. + operationId: listAssistants + parameters: + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/CreateChatCompletionResponse' + $ref: '#/components/schemas/ListAssistantsResponse' x-oaiMeta: - name: Create chat completion - group: chat - returns: "Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed.\n" - path: create + name: List assistants + group: assistants + beta: true + returns: 'A list of [assistant](/docs/api-reference/assistants/object) objects.' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - - title: Image input - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"What'\\''s in this image?\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n }\n }\n ]\n }\n ],\n \"max_tokens\": 300\n }'\n" - python: "from openai import OpenAI\n\nclient = OpenAI()\n\nresponse = client.chat.completions.create(\n model=\"gpt-4o\",\n messages=[\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image_url\",\n \"image_url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n ],\n }\n ],\n max_tokens=300,\n)\n\nprint(response.choices[0])\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"What's in this image?\" },\n {\n type: \"image_url\",\n image_url:\n \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n ],\n },\n ],\n });\n console.log(response.choices[0]);\n}\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_model_id\",\n messages: [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" - response: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - - title: Functions - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What'\\''s the weather like in Boston today?\"\n }\n ],\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"tool_choice\": \"auto\"\n}'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\nmessages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=messages,\n tools=tools,\n tool_choice=\"auto\"\n)\n\nprint(completion)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}];\n const tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n ];\n\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: messages,\n tools: tools,\n tool_choice: \"auto\",\n });\n\n console.log(response);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99\n }\n}\n" - - title: Logprobs - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"logprobs\": true,\n \"top_logprobs\": 2\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n logprobs=True,\n top_logprobs=2\n)\n\nprint(completion.choices[0].message)\nprint(completion.choices[0].logprobs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"user\", content: \"Hello!\" }],\n model: \"VAR_model_id\",\n logprobs: true,\n top_logprobs: 2,\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18\n },\n \"system_fingerprint\": null\n}\n" - /completions: + request: + curl: "curl \"https://api.openai.com/v1/assistants?order=desc&limit=20\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistants = client.beta.assistants.list(\n order=\"desc\",\n limit=\"20\",\n)\nprint(my_assistants.data)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistants = await openai.beta.assistants.list({\n order: \"desc\",\n limit: \"20\",\n });\n\n console.log(myAssistants.data);\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" post: tags: - - Completions - summary: Creates a completion for the provided prompt and parameters. - operationId: createCompletion + - Assistants + summary: Create an assistant with a model and instructions. + operationId: createAssistant requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionRequest' + $ref: '#/components/schemas/CreateAssistantRequest' required: true responses: '200': @@ -86,94 +80,73 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CreateCompletionResponse' + $ref: '#/components/schemas/AssistantObject' x-oaiMeta: - name: Create completion - group: completions - returns: "Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed.\n" - legacy: true + name: Create assistant + group: assistants + beta: true + returns: 'An [assistant](/docs/api-reference/assistants/object) object.' examples: - - title: No streaming + - title: Code Interpreter request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n max_tokens: 7,\n temperature: 0,\n });\n\n console.log(completion);\n}\nmain();" - response: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"VAR_model_id\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - - title: Streaming + curl: "curl \"https://api.openai.com/v1/assistants\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"name\": \"Math Tutor\",\n \"tools\": [{\"type\": \"code_interpreter\"}],\n \"model\": \"gpt-4o\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name=\"Math Tutor\",\n tools=[{\"type\": \"code_interpreter\"}],\n model=\"gpt-4o\",\n)\nprint(my_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name: \"Math Tutor\",\n tools: [{ type: \"code_interpreter\" }],\n model: \"gpt-4o\",\n });\n\n console.log(myAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + - title: Files request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0,\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nfor chunk in client.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0,\n stream=True\n):\n print(chunk.choices[0].text)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n stream: true,\n });\n\n for await (const chunk of stream) {\n console.log(chunk.choices[0].text)\n }\n}\nmain();" - response: "{\n \"id\": \"cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe\",\n \"object\": \"text_completion\",\n \"created\": 1690759702,\n \"choices\": [\n {\n \"text\": \"This\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": null\n }\n ],\n \"model\": \"gpt-3.5-turbo-instruct\"\n \"system_fingerprint\": \"fp_44709d6fcb\",\n}\n" - /images/generations: - post: - tags: - - Images - summary: Creates an image given a prompt. - operationId: createImage - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateImageRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - x-oaiMeta: - name: Create image - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/images/generations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"dall-e-3\",\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 1,\n \"size\": \"1024x1024\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.generate(\n model=\"dall-e-3\",\n prompt=\"A cute baby sea otter\",\n n=1,\n size=\"1024x1024\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.generate({ model: \"dall-e-3\", prompt: \"A cute baby sea otter\" });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/edits: - post: + curl: "curl https://api.openai.com/v1/assistants \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"tool_resources\": {\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n \"model\": \"gpt-4o\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n tool_resources={\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n model=\"gpt-4o\"\n)\nprint(my_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n tool_resources: {\n file_search: {\n vector_store_ids: [\"vs_123\"]\n }\n },\n model: \"gpt-4o\"\n });\n\n console.log(myAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009403,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + '/assistants/{assistant_id}': + get: tags: - - Images - summary: Creates an edited or extended image given an original image and a prompt. - operationId: createImageEdit - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageEditRequest' - required: true + - Assistants + summary: Retrieves an assistant. + operationId: getAssistant + parameters: + - name: assistant_id + in: path + description: The ID of the assistant to retrieve. + required: true + schema: + type: string responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/ImagesResponse' + $ref: '#/components/schemas/AssistantObject' x-oaiMeta: - name: Create image edit - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + name: Retrieve assistant + group: assistants + beta: true + returns: 'The [assistant](/docs/api-reference/assistants/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/images/edits \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F mask=\"@mask.png\" \\\n -F prompt=\"A cute baby sea otter wearing a beret\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.edit(\n image=open(\"otter.png\", \"rb\"),\n mask=open(\"mask.png\", \"rb\"),\n prompt=\"A cute baby sea otter wearing a beret\",\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.edit({\n image: fs.createReadStream(\"otter.png\"),\n mask: fs.createReadStream(\"mask.png\"),\n prompt: \"A cute baby sea otter wearing a beret\",\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/variations: + curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.retrieve(\"asst_abc123\")\nprint(my_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.retrieve(\n \"asst_abc123\"\n );\n\n console.log(myAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" post: tags: - - Images - summary: Creates a variation of a given image. - operationId: createImageVariation + - Assistants + summary: Modifies an assistant. + operationId: modifyAssistant + parameters: + - name: assistant_id + in: path + description: The ID of the assistant to modify. + required: true + schema: + type: string requestBody: content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/CreateImageVariationRequest' + $ref: '#/components/schemas/ModifyAssistantRequest' required: true responses: '200': @@ -181,46 +154,48 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ImagesResponse' + $ref: '#/components/schemas/AssistantObject' x-oaiMeta: - name: Create image variation - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + name: Modify assistant + group: assistants + beta: true + returns: 'The modified [assistant](/docs/api-reference/assistants/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/images/variations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.images.create_variation(\n image=open(\"image_edit_original.png\", \"rb\"),\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.createVariation({\n image: fs.createReadStream(\"otter.png\"),\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /embeddings: - post: + curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"model\": \"gpt-4o\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_assistant = client.beta.assistants.update(\n \"asst_abc123\",\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n model=\"gpt-4o\"\n)\n\nprint(my_updated_assistant)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myUpdatedAssistant = await openai.beta.assistants.update(\n \"asst_abc123\",\n {\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n model: \"gpt-4o\"\n }\n );\n\n console.log(myUpdatedAssistant);\n}\n\nmain();" + response: "{\n \"id\": \"asst_123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": []\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + delete: tags: - - Embeddings - summary: Creates an embedding vector representing the input text. - operationId: createEmbedding - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingRequest' - required: true + - Assistants + summary: Delete an assistant. + operationId: deleteAssistant + parameters: + - name: assistant_id + in: path + description: The ID of the assistant to delete. + required: true + schema: + type: string responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/CreateEmbeddingResponse' + $ref: '#/components/schemas/DeleteAssistantResponse' x-oaiMeta: - name: Create embeddings - group: embeddings - returns: 'A list of [embedding](/docs/api-reference/embeddings/object) objects.' + name: Delete assistant + group: assistants + beta: true + returns: Deletion status examples: request: - curl: "curl https://api.openai.com/v1/embeddings \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input\": \"The food was delicious and the waiter...\",\n \"model\": \"text-embedding-ada-002\",\n \"encoding_format\": \"float\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.embeddings.create(\n model=\"text-embedding-ada-002\",\n input=\"The food was delicious and the waiter...\",\n encoding_format=\"float\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const embedding = await openai.embeddings.create({\n model: \"text-embedding-ada-002\",\n input: \"The quick brown fox jumped over the lazy dog\",\n encoding_format: \"float\",\n });\n\n console.log(embedding);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": 8,\n \"total_tokens\": 8\n }\n}\n" + curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.assistants.delete(\"asst_abc123\")\nprint(response)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.assistants.del(\"asst_abc123\");\n\n console.log(response);\n}\nmain();" + response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant.deleted\",\n \"deleted\": true\n}\n" /audio/speech: post: tags: @@ -330,162 +305,166 @@ paths: python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.translations.create(\n model=\"whisper-1\",\n file=audio_file\n)\n" node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const translation = await openai.audio.translations.create({\n file: fs.createReadStream(\"speech.mp3\"),\n model: \"whisper-1\",\n });\n\n console.log(translation.text);\n}\nmain();\n" response: "{\n \"text\": \"Hello, my name is Wolfgang and I come from Germany. Where are you heading today?\"\n}\n" - /files: - get: + /batches: + post: tags: - - Files - summary: Returns a list of files that belong to the user's organization. - operationId: listFiles - parameters: - - name: purpose - in: query - description: Only return files with the given purpose. - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFilesResponse' - x-oaiMeta: - name: List files - group: files - returns: 'A list of [File](/docs/api-reference/files/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.files.list();\n\n for await (const file of list) {\n console.log(file);\n }\n}\n\nmain();" - response: "{\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 175,\n \"created_at\": 1613677385,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n },\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"puppy.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n ],\n \"object\": \"list\"\n}\n" - post: - tags: - - Files - summary: "Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.\n\nThe Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.\n\nThe Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.\n\nThe Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).\n\nPlease [contact us](https://help.openai.com/) if you need to increase these storage limits.\n" - operationId: createFile + - Batch + summary: Creates and executes a batch from an uploaded file of requests + operationId: createBatch requestBody: content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/CreateFileRequest' + required: + - input_file_id + - endpoint + - completion_window + type: object + properties: + input_file_id: + type: string + description: "The ID of an uploaded file that contains requests for the new batch.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size.\n" + endpoint: + enum: + - /v1/chat/completions + - /v1/embeddings + - /v1/completions + type: string + description: 'The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.' + completion_window: + enum: + - 24h + type: string + description: The time frame within which the batch should be processed. Currently only `24h` is supported. + metadata: + type: object + additionalProperties: + type: string + description: Optional custom metadata for the batch. + nullable: true required: true responses: '200': - description: OK + description: Batch created successfully. content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/Batch' x-oaiMeta: - name: Upload file - group: files - returns: 'The uploaded [File](/docs/api-reference/files/object) object.' + name: Create batch + group: batch + returns: 'The created [Batch](/docs/api-reference/batch/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F purpose=\"fine-tune\" \\\n -F file=\"@mydata.jsonl\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.create(\n file=open(\"mydata.jsonl\", \"rb\"),\n purpose=\"fine-tune\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.create({\n file: fs.createReadStream(\"mydata.jsonl\"),\n purpose: \"fine-tune\",\n });\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}': - delete: + curl: "curl https://api.openai.com/v1/batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input_file_id\": \"file-abc123\",\n \"endpoint\": \"/v1/chat/completions\",\n \"completion_window\": \"24h\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.create(\n input_file_id=\"file-abc123\",\n endpoint=\"/v1/chat/completions\",\n completion_window=\"24h\"\n)\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.create({\n input_file_id: \"file-abc123\",\n endpoint: \"/v1/chat/completions\",\n completion_window: \"24h\"\n });\n\n console.log(batch);\n}\n\nmain();\n" + response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"validating\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": null,\n \"expires_at\": null,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 0,\n \"completed\": 0,\n \"failed\": 0\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + get: tags: - - Files - summary: Delete a file. - operationId: deleteFile + - Batch + summary: List your organization's batches. + operationId: listBatches parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 responses: '200': - description: OK + description: Batch listed successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteFileResponse' + $ref: '#/components/schemas/ListBatchesResponse' x-oaiMeta: - name: Delete file - group: files - returns: Deletion status. + name: List batch + group: batch + returns: 'A list of paginated [Batch](/docs/api-reference/batch/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.delete(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.del(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"deleted\": true\n}\n" + curl: "curl https://api.openai.com/v1/batches?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.list()\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.batches.list();\n\n for await (const batch of list) {\n console.log(batch);\n }\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly job\",\n }\n },\n { ... },\n ],\n \"first_id\": \"batch_abc123\",\n \"last_id\": \"batch_abc456\",\n \"has_more\": true\n}\n" + '/batches/{batch_id}': get: tags: - - Files - summary: Returns information about a specific file. - operationId: retrieveFile + - Batch + summary: Retrieves a batch. + operationId: retrieveBatch parameters: - - name: file_id + - name: batch_id in: path - description: The ID of the file to use for this request. + description: The ID of the batch to retrieve. required: true schema: type: string responses: '200': - description: OK + description: Batch retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/OpenAIFile' + $ref: '#/components/schemas/Batch' x-oaiMeta: - name: Retrieve file - group: files - returns: 'The [File](/docs/api-reference/files/object) object matching the specified ID.' + name: Retrieve batch + group: batch + returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.retrieve(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.retrieve(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}/content': - get: + curl: "curl https://api.openai.com/v1/batches/batch_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.retrieve(\"batch_abc123\")\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.retrieve(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" + response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + '/batches/{batch_id}/cancel': + post: tags: - - Files - summary: Returns the contents of the specified file. - operationId: downloadFile + - Batch + summary: 'Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file.' + operationId: cancelBatch parameters: - - name: file_id + - name: batch_id in: path - description: The ID of the file to use for this request. + description: The ID of the batch to cancel. required: true schema: type: string responses: '200': - description: OK + description: Batch is cancelling. Returns the cancelling batch's details. content: application/json: schema: - type: string + $ref: '#/components/schemas/Batch' x-oaiMeta: - name: Retrieve file content - group: files - returns: The file content. + name: Cancel batch + group: batch + returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/files/file-abc123/content \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" > file.jsonl\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncontent = client.files.content(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.content(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();\n" - /uploads: + curl: "curl https://api.openai.com/v1/batches/batch_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -X POST\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.cancel(\"batch_abc123\")\n" + node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.cancel(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" + response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"cancelling\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": 1711475133,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 23,\n \"failed\": 1\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + /chat/completions: post: tags: - - Uploads - summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search/supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" - operationId: createUpload + - Chat + summary: "Creates a model response for the given chat conversation. Learn more in the\n[text generation](/docs/guides/text-generation), [vision](/docs/guides/vision),\nand [audio](/docs/guides/audio) guides.\n" + operationId: createChatCompletion requestBody: content: application/json: schema: - $ref: '#/components/schemas/CreateUploadRequest' + $ref: '#/components/schemas/CreateChatCompletionRequest' required: true responses: '200': @@ -493,34 +472,54 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Upload' + $ref: '#/components/schemas/CreateChatCompletionResponse' x-oaiMeta: - name: Create upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `pending`.' + name: Create chat completion + group: chat + returns: "Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed.\n" + path: create examples: - request: - curl: "curl https://api.openai.com/v1/uploads \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"purpose\": \"fine-tune\",\n \"filename\": \"training_examples.jsonl\",\n \"bytes\": 2147483648,\n \"mime_type\": \"text/jsonl\"\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"pending\",\n \"expires_at\": 1719127296\n}\n" - '/uploads/{upload_id}/parts': + - title: Default + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_chat_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_chat_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" + - title: Image input + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"What'\\''s in this image?\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n }\n }\n ]\n }\n ],\n \"max_tokens\": 300\n }'\n" + python: "from openai import OpenAI\n\nclient = OpenAI()\n\nresponse = client.chat.completions.create(\n model=\"gpt-4o\",\n messages=[\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n }\n },\n ],\n }\n ],\n max_tokens=300,\n)\n\nprint(response.choices[0])\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"What's in this image?\" },\n {\n type: \"image_url\",\n image_url: {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n }\n ],\n },\n ],\n });\n console.log(response.choices[0]);\n}\nmain();" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_chat_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_chat_model_id\",\n messages: [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" + response: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" + - title: Functions + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What'\\''s the weather like in Boston today?\"\n }\n ],\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"tool_choice\": \"auto\"\n}'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\nmessages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=messages,\n tools=tools,\n tool_choice=\"auto\"\n)\n\nprint(completion)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}];\n const tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n ];\n\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: messages,\n tools: tools,\n tool_choice: \"auto\",\n });\n\n console.log(response);\n}\n\nmain();" + response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" + - title: Logprobs + request: + curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_chat_model_id\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"logprobs\": true,\n \"top_logprobs\": 2\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_chat_model_id\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n logprobs=True,\n top_logprobs=2\n)\n\nprint(completion.choices[0].message)\nprint(completion.choices[0].logprobs)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"user\", content: \"Hello!\" }],\n model: \"VAR_chat_model_id\",\n logprobs: true,\n top_logprobs: 2,\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" + response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": null\n}\n" + /completions: post: tags: - - Uploads - summary: "Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. \n\nEach Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB.\n\nIt is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete).\n" - operationId: addUploadPart - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 + - Completions + summary: Creates a completion for the provided prompt and parameters. + operationId: createCompletion requestBody: content: - multipart/form-data: + application/json: schema: - $ref: '#/components/schemas/AddUploadPartRequest' + $ref: '#/components/schemas/CreateCompletionRequest' required: true responses: '200': @@ -528,34 +527,36 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/UploadPart' + $ref: '#/components/schemas/CreateCompletionResponse' x-oaiMeta: - name: Add upload part - group: uploads - returns: 'The upload [Part](/docs/api-reference/uploads/part-object) object.' + name: Create completion + group: completions + returns: "Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed.\n" + legacy: true examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/parts\n -F data=\"aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz...\"\n" - response: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719185911,\n \"upload_id\": \"upload_abc123\"\n}\n" - '/uploads/{upload_id}/complete': + - title: No streaming + request: + curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_completion_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.completions.create(\n model=\"VAR_completion_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.completions.create({\n model: \"VAR_completion_model_id\",\n prompt: \"Say this is a test.\",\n max_tokens: 7,\n temperature: 0,\n });\n\n console.log(completion);\n}\nmain();" + response: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"VAR_completion_model_id\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_completion_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0,\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nfor chunk in client.completions.create(\n model=\"VAR_completion_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0,\n stream=True\n):\n print(chunk.choices[0].text)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.completions.create({\n model: \"VAR_completion_model_id\",\n prompt: \"Say this is a test.\",\n stream: true,\n });\n\n for await (const chunk of stream) {\n console.log(chunk.choices[0].text)\n }\n}\nmain();" + response: "{\n \"id\": \"cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe\",\n \"object\": \"text_completion\",\n \"created\": 1690759702,\n \"choices\": [\n {\n \"text\": \"This\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": null\n }\n ],\n \"model\": \"gpt-3.5-turbo-instruct\"\n \"system_fingerprint\": \"fp_44709d6fcb\",\n}\n" + /embeddings: post: tags: - - Uploads - summary: "Completes the [Upload](/docs/api-reference/uploads/object). \n\nWithin the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform.\n\nYou can specify the order of the Parts by passing in an ordered list of the Part IDs.\n\nThe number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed.\n" - operationId: completeUpload - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 - requestBody: + - Embeddings + summary: Creates an embedding vector representing the input text. + operationId: createEmbedding + requestBody: content: application/json: schema: - $ref: '#/components/schemas/CompleteUploadRequest' + $ref: '#/components/schemas/CreateEmbeddingRequest' required: true responses: '200': @@ -563,44 +564,182 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Upload' + $ref: '#/components/schemas/CreateEmbeddingResponse' x-oaiMeta: - name: Complete upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object.' + name: Create embeddings + group: embeddings + returns: 'A list of [embedding](/docs/api-reference/embeddings/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/complete\n -d '{\n \"part_ids\": [\"part_def456\", \"part_ghi789\"]\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - '/uploads/{upload_id}/cancel': + curl: "curl https://api.openai.com/v1/embeddings \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input\": \"The food was delicious and the waiter...\",\n \"model\": \"text-embedding-ada-002\",\n \"encoding_format\": \"float\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.embeddings.create(\n model=\"text-embedding-ada-002\",\n input=\"The food was delicious and the waiter...\",\n encoding_format=\"float\"\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const embedding = await openai.embeddings.create({\n model: \"text-embedding-ada-002\",\n input: \"The quick brown fox jumped over the lazy dog\",\n encoding_format: \"float\",\n });\n\n console.log(embedding);\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": 8,\n \"total_tokens\": 8\n }\n}\n" + /files: + get: + tags: + - Files + summary: Returns a list of files. + operationId: listFiles + parameters: + - name: purpose + in: query + description: Only return files with the given purpose. + schema: + type: string + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.\n" + schema: + type: integer + default: 10000 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListFilesResponse' + x-oaiMeta: + name: List files + group: files + returns: 'A list of [File](/docs/api-reference/files/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.list()\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.files.list();\n\n for await (const file of list) {\n console.log(file);\n }\n}\n\nmain();" + response: "{\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 175,\n \"created_at\": 1613677385,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n },\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"puppy.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n ],\n \"object\": \"list\"\n}\n" post: tags: - - Uploads - summary: "Cancels the Upload. No Parts may be added after an Upload is cancelled.\n" - operationId: cancelUpload + - Files + summary: "Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.\n\nThe Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.\n\nThe Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.\n\nThe Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).\n\nPlease [contact us](https://help.openai.com/) if you need to increase these storage limits.\n" + operationId: createFile + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateFileRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + x-oaiMeta: + name: Upload file + group: files + returns: 'The uploaded [File](/docs/api-reference/files/object) object.' + examples: + request: + curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F purpose=\"fine-tune\" \\\n -F file=\"@mydata.jsonl\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.create(\n file=open(\"mydata.jsonl\", \"rb\"),\n purpose=\"fine-tune\"\n)\n" + node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.create({\n file: fs.createReadStream(\"mydata.jsonl\"),\n purpose: \"fine-tune\",\n });\n\n console.log(file);\n}\n\nmain();" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" + '/files/{file_id}': + delete: + tags: + - Files + summary: Delete a file. + operationId: deleteFile parameters: - - name: upload_id + - name: file_id in: path - description: "The ID of the Upload.\n" + description: The ID of the file to use for this request. required: true schema: type: string - example: upload_abc123 responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/Upload' + $ref: '#/components/schemas/DeleteFileResponse' x-oaiMeta: - name: Cancel upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`.' + name: Delete file + group: files + returns: Deletion status. examples: request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/cancel\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"cancelled\",\n \"expires_at\": 1719127296\n}\n" + curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.delete(\"file-abc123\")\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.del(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"deleted\": true\n}\n" + get: + tags: + - Files + summary: Returns information about a specific file. + operationId: retrieveFile + parameters: + - name: file_id + in: path + description: The ID of the file to use for this request. + required: true + schema: + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFile' + x-oaiMeta: + name: Retrieve file + group: files + returns: 'The [File](/docs/api-reference/files/object) object matching the specified ID.' + examples: + request: + curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.retrieve(\"file-abc123\")\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.retrieve(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" + '/files/{file_id}/content': + get: + tags: + - Files + summary: Returns the contents of the specified file. + operationId: downloadFile + parameters: + - name: file_id + in: path + description: The ID of the file to use for this request. + required: true + schema: + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + type: string + x-oaiMeta: + name: Retrieve file content + group: files + returns: The file content. + examples: + request: + curl: "curl https://api.openai.com/v1/files/file-abc123/content \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" > file.jsonl\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ncontent = client.files.content(\"file-abc123\")\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.content(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();\n" /fine_tuning/jobs: post: tags: @@ -712,48 +851,6 @@ paths: python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.retrieve(\"ftjob-abc123\")\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.retrieve(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\n\nmain();\n" response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/events': - get: - tags: - - Fine-tuning - summary: "Get status updates for a fine-tuning job.\n" - operationId: listFineTuningEvents - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to get events for.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - - name: after - in: query - description: Identifier for the last event from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of events to retrieve. - schema: - type: integer - default: 20 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuningJobEventsResponse' - x-oaiMeta: - name: List fine-tuning events - group: fine-tuning - returns: A list of fine-tuning event objects. - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list_events(\n fine_tuning_job_id=\"ftjob-abc123\",\n limit=2\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.list_events(id=\"ftjob-abc123\", limit=2);\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-ddTJfwuMVpfLXseO0Am0Gqjm\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"Fine tuning job successfully completed\",\n \"data\": null,\n \"type\": \"message\"\n },\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-tyiGuB72evQncpH87xe505Sv\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel\",\n \"data\": null,\n \"type\": \"message\"\n }\n ],\n \"has_more\": true\n}\n" '/fine_tuning/jobs/{fine_tuning_job_id}/cancel': post: tags: @@ -825,26 +922,155 @@ paths: request: curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" response: "{\n \"object\": \"list\"\n \"data\": [\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"created_at\": 1721764867,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000\",\n \"metrics\": {\n \"full_valid_loss\": 0.134,\n \"full_valid_mean_token_accuracy\": 0.874\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 2000,\n },\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"created_at\": 1721764800,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000\",\n \"metrics\": {\n \"full_valid_loss\": 0.167,\n \"full_valid_mean_token_accuracy\": 0.781\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 1000,\n },\n ],\n \"first_id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"last_id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"has_more\": true\n}\n" - /models: + '/fine_tuning/jobs/{fine_tuning_job_id}/events': get: tags: - - Models - summary: 'Lists the currently available models, and provides basic information about each one such as the owner and availability.' - operationId: listModels + - Fine-tuning + summary: "Get status updates for a fine-tuning job.\n" + operationId: listFineTuningEvents + parameters: + - name: fine_tuning_job_id + in: path + description: "The ID of the fine-tuning job to get events for.\n" + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + - name: after + in: query + description: Identifier for the last event from the previous pagination request. + schema: + type: string + - name: limit + in: query + description: Number of events to retrieve. + schema: + type: integer + default: 20 responses: '200': description: OK content: application/json: schema: - $ref: '#/components/schemas/ListModelsResponse' + $ref: '#/components/schemas/ListFineTuningJobEventsResponse' x-oaiMeta: - name: List models - group: models - returns: 'A list of [model](/docs/api-reference/models/object) objects.' + name: List fine-tuning events + group: fine-tuning + returns: A list of fine-tuning event objects. examples: request: - curl: "curl https://api.openai.com/v1/models \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list_events(\n fine_tuning_job_id=\"ftjob-abc123\",\n limit=2\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.list_events(id=\"ftjob-abc123\", limit=2);\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-ddTJfwuMVpfLXseO0Am0Gqjm\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"Fine tuning job successfully completed\",\n \"data\": null,\n \"type\": \"message\"\n },\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-tyiGuB72evQncpH87xe505Sv\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel\",\n \"data\": null,\n \"type\": \"message\"\n }\n ],\n \"has_more\": true\n}\n" + /images/edits: + post: + tags: + - Images + summary: Creates an edited or extended image given an original image and a prompt. + operationId: createImageEdit + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageEditRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image edit + group: images + returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/images/edits \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F mask=\"@mask.png\" \\\n -F prompt=\"A cute baby sea otter wearing a beret\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.edit(\n image=open(\"otter.png\", \"rb\"),\n mask=open(\"mask.png\", \"rb\"),\n prompt=\"A cute baby sea otter wearing a beret\",\n n=2,\n size=\"1024x1024\"\n)\n" + node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.edit({\n image: fs.createReadStream(\"otter.png\"),\n mask: fs.createReadStream(\"mask.png\"),\n prompt: \"A cute baby sea otter wearing a beret\",\n });\n\n console.log(image.data);\n}\nmain();" + response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" + /images/generations: + post: + tags: + - Images + summary: Creates an image given a prompt. + operationId: createImage + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateImageRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image + group: images + returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/images/generations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"dall-e-3\",\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 1,\n \"size\": \"1024x1024\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.generate(\n model=\"dall-e-3\",\n prompt=\"A cute baby sea otter\",\n n=1,\n size=\"1024x1024\"\n)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.generate({ model: \"dall-e-3\", prompt: \"A cute baby sea otter\" });\n\n console.log(image.data);\n}\nmain();" + response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" + /images/variations: + post: + tags: + - Images + summary: Creates a variation of a given image. + operationId: createImageVariation + requestBody: + content: + multipart/form-data: + schema: + $ref: '#/components/schemas/CreateImageVariationRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ImagesResponse' + x-oaiMeta: + name: Create image variation + group: images + returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/images/variations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.images.create_variation(\n image=open(\"image_edit_original.png\", \"rb\"),\n n=2,\n size=\"1024x1024\"\n)\n" + node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.createVariation({\n image: fs.createReadStream(\"otter.png\"),\n });\n\n console.log(image.data);\n}\nmain();" + response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" + /models: + get: + tags: + - Models + summary: 'Lists the currently available models, and provides basic information about each one such as the owner and availability.' + operationId: listModels + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListModelsResponse' + x-oaiMeta: + name: List models + group: models + returns: 'A list of [model](/docs/api-reference/models/object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/models \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.list()\n" node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.models.list();\n\n for await (const model of list) {\n console.log(model);\n }\n}\nmain();" response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"model-id-0\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"organization-owner\"\n },\n {\n \"id\": \"model-id-1\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"organization-owner\",\n },\n {\n \"id\": \"model-id-2\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n },\n ],\n \"object\": \"list\"\n}\n" @@ -875,10 +1101,10 @@ paths: returns: 'The [model](/docs/api-reference/models/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/models/VAR_model_id \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.retrieve(\"VAR_model_id\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const model = await openai.models.retrieve(\"VAR_model_id\");\n\n console.log(model);\n}\n\nmain();" - response: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" + curl: "curl https://api.openai.com/v1/models/VAR_chat_model_id \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.retrieve(\"VAR_chat_model_id\")\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const model = await openai.models.retrieve(\"VAR_chat_model_id\");\n\n console.log(model);\n}\n\nmain();" + response: "{\n \"id\": \"VAR_chat_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" delete: tags: - Models @@ -913,7 +1139,7 @@ paths: post: tags: - Moderations - summary: Classifies if text is potentially harmful. + summary: "Classifies if text and/or image inputs are potentially harmful. Learn\nmore in the [moderation guide](/docs/guides/moderation).\n" operationId: createModeration requestBody: content: @@ -933,33 +1159,84 @@ paths: group: moderations returns: 'A [moderation](/docs/api-reference/moderations/object) object.' examples: - request: - curl: "curl https://api.openai.com/v1/moderations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"input\": \"I want to kill them.\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmoderation = client.moderations.create(input=\"I want to kill them.\")\nprint(moderation)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const moderation = await openai.moderations.create({ input: \"I want to kill them.\" });\n\n console.log(moderation);\n}\nmain();\n" - response: "{\n \"id\": \"modr-XXXXX\",\n \"model\": \"text-moderation-005\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"sexual\": false,\n \"hate\": false,\n \"harassment\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"hate/threatening\": false,\n \"violence/graphic\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"harassment/threatening\": true,\n \"violence\": true,\n },\n \"category_scores\": {\n \"sexual\": 1.2282071e-06,\n \"hate\": 0.010696256,\n \"harassment\": 0.29842457,\n \"self-harm\": 1.5236925e-08,\n \"sexual/minors\": 5.7246268e-08,\n \"hate/threatening\": 0.0060676364,\n \"violence/graphic\": 4.435014e-06,\n \"self-harm/intent\": 8.098441e-10,\n \"self-harm/instructions\": 2.8498655e-11,\n \"harassment/threatening\": 0.63055265,\n \"violence\": 0.99011886,\n }\n }\n ]\n}\n" - /assistants: + - title: Single string + request: + curl: "curl https://api.openai.com/v1/moderations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"input\": \"I want to kill them.\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmoderation = client.moderations.create(input=\"I want to kill them.\")\nprint(moderation)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const moderation = await openai.moderations.create({ input: \"I want to kill them.\" });\n\n console.log(moderation);\n}\nmain();\n" + response: "{\n \"id\": \"modr-AB8CjOTu2jiq12hp1AQPfeqFWaORR\",\n \"model\": \"text-moderation-007\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"sexual\": false,\n \"hate\": false,\n \"harassment\": true,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"hate/threatening\": false,\n \"violence/graphic\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"harassment/threatening\": true,\n \"violence\": true\n },\n \"category_scores\": {\n \"sexual\": 0.000011726012417057063,\n \"hate\": 0.22706663608551025,\n \"harassment\": 0.5215635299682617,\n \"self-harm\": 2.227119921371923e-6,\n \"sexual/minors\": 7.107352217872176e-8,\n \"hate/threatening\": 0.023547329008579254,\n \"violence/graphic\": 0.00003391829886822961,\n \"self-harm/intent\": 1.646940972932498e-6,\n \"self-harm/instructions\": 1.1198755256458526e-9,\n \"harassment/threatening\": 0.5694745779037476,\n \"violence\": 0.9971134662628174\n }\n }\n ]\n}\n" + - title: Image and text + request: + curl: "curl https://api.openai.com/v1/moderations \\\n -X POST \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"omni-moderation-latest\",\n \"input\": [\n { \"type\": \"text\", \"text\": \"...text to classify goes here...\" },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://example.com/image.png\"\n }\n }\n ]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.moderations.create(\n model=\"omni-moderation-latest\",\n input=[\n {\"type\": \"text\", \"text\": \"...text to classify goes here...\"},\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://example.com/image.png\",\n # can also use base64 encoded image URLs\n # \"url\": \"data:image/jpeg;base64,abcdefg...\"\n }\n },\n ],\n)\n\nprint(response)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nconst moderation = await openai.moderations.create({\n model: \"omni-moderation-latest\",\n input: [\n { type: \"text\", text: \"...text to classify goes here...\" },\n {\n type: \"image_url\",\n image_url: {\n url: \"https://example.com/image.png\"\n // can also use base64 encoded image URLs\n // url: \"data:image/jpeg;base64,abcdefg...\"\n }\n }\n ],\n});\n\nconsole.log(moderation);\n" + response: "{\n \"id\": \"modr-0d9740456c391e43c445bf0f010940c7\",\n \"model\": \"omni-moderation-latest\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"harassment\": true,\n \"harassment/threatening\": true,\n \"sexual\": false,\n \"hate\": false,\n \"hate/threatening\": false,\n \"illicit\": false,\n \"illicit/violent\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"violence\": true,\n \"violence/graphic\": true\n },\n \"category_scores\": {\n \"harassment\": 0.8189693396524255,\n \"harassment/threatening\": 0.804985420696006,\n \"sexual\": 1.573112165348997e-6,\n \"hate\": 0.007562942636942845,\n \"hate/threatening\": 0.004208854591835476,\n \"illicit\": 0.030535955153511665,\n \"illicit/violent\": 0.008925306722380033,\n \"self-harm/intent\": 0.00023023930975076432,\n \"self-harm/instructions\": 0.0002293869201073356,\n \"self-harm\": 0.012598046106750154,\n \"sexual/minors\": 2.212566909570261e-8,\n \"violence\": 0.9999992735124786,\n \"violence/graphic\": 0.843064871157054\n },\n \"category_applied_input_types\": {\n \"harassment\": [\n \"text\"\n ],\n \"harassment/threatening\": [\n \"text\"\n ],\n \"sexual\": [\n \"text\",\n \"image\"\n ],\n \"hate\": [\n \"text\"\n ],\n \"hate/threatening\": [\n \"text\"\n ],\n \"illicit\": [\n \"text\"\n ],\n \"illicit/violent\": [\n \"text\"\n ],\n \"self-harm/intent\": [\n \"text\",\n \"image\"\n ],\n \"self-harm/instructions\": [\n \"text\",\n \"image\"\n ],\n \"self-harm\": [\n \"text\",\n \"image\"\n ],\n \"sexual/minors\": [\n \"text\"\n ],\n \"violence\": [\n \"text\",\n \"image\"\n ],\n \"violence/graphic\": [\n \"text\",\n \"image\"\n ]\n }\n }\n ]\n}\n" + /organization/audit_logs: get: tags: - - Assistants - summary: Returns a list of assistants. - operationId: listAssistants + - Audit Logs + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this range. + schema: + type: object + properties: + gt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. + - name: 'project_ids[]' + in: query + description: Return only events for these projects. + schema: + type: array + items: + type: string + - name: 'event_types[]' + in: query + description: 'Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object).' + schema: + type: array + items: + $ref: '#/components/schemas/AuditLogEventType' + - name: 'actor_ids[]' + in: query + description: 'Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID.' + schema: + type: array + items: + type: string + - name: 'actor_emails[]' + in: query + description: Return only events performed by users with these emails. + schema: + type: array + items: + type: string + - name: 'resource_ids[]' + in: query + description: 'Return only events performed on these targets. For example, a project ID updated.' + schema: + type: array + items: + type: string - name: limit in: query description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: type: integer default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" @@ -967,303 +1244,359 @@ paths: type: string - name: before in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string responses: '200': - description: OK + description: Audit logs listed successfully. content: application/json: schema: - $ref: '#/components/schemas/ListAssistantsResponse' + $ref: '#/components/schemas/ListAuditLogsResponse' x-oaiMeta: - name: List assistants - group: assistants - beta: true - returns: 'A list of [assistant](/docs/api-reference/assistants/object) objects.' + name: List audit logs + group: audit-logs + returns: 'A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects.' examples: request: - curl: "curl \"https://api.openai.com/v1/assistants?order=desc&limit=20\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistants = client.beta.assistants.list(\n order=\"desc\",\n limit=\"20\",\n)\nprint(my_assistants.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistants = await openai.beta.assistants.list({\n order: \"desc\",\n limit: \"20\",\n });\n\n console.log(myAssistants.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - post: + curl: "curl https://api.openai.com/v1/organization/audit_logs \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"audit_log-xxx_yyyymmdd\",\n \"type\": \"project.archived\",\n \"effective_at\": 1722461446,\n \"actor\": {\n \"type\": \"api_key\",\n \"api_key\": {\n \"type\": \"user\",\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n }\n }\n },\n \"project.archived\": {\n \"id\": \"proj_abc\"\n },\n },\n {\n \"id\": \"audit_log-yyy__20240101\",\n \"type\": \"api_key.updated\",\n \"effective_at\": 1720804190,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.updated\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource_2.operation_2\"]\n }\n },\n }\n ],\n \"first_id\": \"audit_log-xxx__20240101\",\n \"last_id\": \"audit_log_yyy__20240101\",\n \"has_more\": true\n}\n" + /organization/costs: + get: tags: - - Assistants - summary: Create an assistant with a model and instructions. - operationId: createAssistant - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAssistantRequest' - required: true + - Usage + summary: Get costs details for the organization. + operationId: usage-costs + parameters: + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' + required: true + schema: + type: integer + - name: end_time + in: query + description: 'End time (Unix seconds) of the query time range, exclusive.' + schema: + type: integer + - name: bucket_width + in: query + description: 'Width of each time bucket in response. Currently only `1d` is supported, default to `1d`.' + schema: + enum: + - 1d + type: string + default: 1d + - name: project_ids + in: query + description: Return only costs for these projects. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: 'Group the costs by the specified fields. Support fields include `project_id`, `line_item` and any combination of them.' + schema: + type: array + items: + enum: + - project_id + - line_item + type: string + - name: limit + in: query + description: "A limit on the number of buckets to be returned. Limit can range between 1 and 180, and the default is 7.\n" + schema: + type: integer + default: 7 + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. + schema: + type: string responses: '200': - description: OK + description: Costs data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/AssistantObject' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Create assistant - group: assistants - beta: true - returns: 'An [assistant](/docs/api-reference/assistants/object) object.' + name: Costs + group: usage-costs + returns: 'A list of paginated, time bucketed [Costs](/docs/api-reference/usage/costs_object) objects.' examples: - - title: Code Interpreter - request: - curl: "curl \"https://api.openai.com/v1/assistants\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"name\": \"Math Tutor\",\n \"tools\": [{\"type\": \"code_interpreter\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name=\"Math Tutor\",\n tools=[{\"type\": \"code_interpreter\"}],\n model=\"gpt-4o\",\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name: \"Math Tutor\",\n tools: [{ type: \"code_interpreter\" }],\n model: \"gpt-4o\",\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - - title: Files - request: - curl: "curl https://api.openai.com/v1/assistants \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"tool_resources\": {\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n tool_resources={\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n model=\"gpt-4o\"\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n tool_resources: {\n file_search: {\n vector_store_ids: [\"vs_123\"]\n }\n },\n model: \"gpt-4o\"\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009403,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - '/assistants/{assistant_id}': + request: + curl: "curl \"https://api.openai.com/v1/organization/costs?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.costs.result\",\n \"amount\": {\n \"value\": 0.06,\n \"currency\": \"usd\"\n },\n \"line_item\": null,\n \"project_id\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/invites: get: tags: - - Assistants - summary: Retrieves an assistant. - operationId: getAssistant + - Invites + summary: Returns a list of invites in the organization. + operationId: list-invites parameters: - - name: assistant_id - in: path - description: The ID of the assistant to retrieve. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string responses: '200': - description: OK + description: Invites listed successfully. content: application/json: schema: - $ref: '#/components/schemas/AssistantObject' + $ref: '#/components/schemas/InviteListResponse' x-oaiMeta: - name: Retrieve assistant - group: assistants - beta: true - returns: 'The [assistant](/docs/api-reference/assistants/object) object matching the specified ID.' + name: List invites + group: administration + returns: 'A list of [Invite](/docs/api-reference/invite/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.retrieve(\"asst_abc123\")\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.retrieve(\n \"asst_abc123\"\n );\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + curl: "curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n }\n ],\n \"first_id\": \"invite-abc\",\n \"last_id\": \"invite-abc\",\n \"has_more\": false\n}\n" post: tags: - - Assistants - summary: Modifies an assistant. - operationId: modifyAssistant - parameters: - - name: assistant_id - in: path - description: The ID of the assistant to modify. - required: true - schema: - type: string + - Invites + summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. + operationId: inviteUser requestBody: + description: The invite request payload. content: application/json: schema: - $ref: '#/components/schemas/ModifyAssistantRequest' + $ref: '#/components/schemas/InviteRequest' required: true responses: '200': - description: OK + description: User invited successfully. content: application/json: schema: - $ref: '#/components/schemas/AssistantObject' + $ref: '#/components/schemas/Invite' x-oaiMeta: - name: Modify assistant - group: assistants - beta: true - returns: 'The modified [assistant](/docs/api-reference/assistants/object) object.' + name: Create invite + group: administration + returns: 'The created [Invite](/docs/api-reference/invite/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_assistant = client.beta.assistants.update(\n \"asst_abc123\",\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n model=\"gpt-4o\"\n)\n\nprint(my_updated_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myUpdatedAssistant = await openai.beta.assistants.update(\n \"asst_abc123\",\n {\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n model: \"gpt-4o\"\n }\n );\n\n console.log(myUpdatedAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": []\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - delete: + curl: "curl -X POST https://api.openai.com/v1/organization/invites \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"email\": \"user@example.com\",\n \"role\": \"owner\"\n }'\n" + response: + content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": null\n}\n" + '/organization/invites/{invite_id}': + get: tags: - - Assistants - summary: Delete an assistant. - operationId: deleteAssistant + - Invites + summary: Retrieves an invite. + operationId: retrieve-invite parameters: - - name: assistant_id + - name: invite_id in: path - description: The ID of the assistant to delete. + description: The ID of the invite to retrieve. required: true schema: type: string responses: '200': - description: OK + description: Invite retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteAssistantResponse' + $ref: '#/components/schemas/Invite' x-oaiMeta: - name: Delete assistant - group: assistants - beta: true - returns: Deletion status + name: Retrieve invite + group: administration + returns: 'The [Invite](/docs/api-reference/invite/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.assistants.delete(\"asst_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.assistants.del(\"asst_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant.deleted\",\n \"deleted\": true\n}\n" - /threads: - post: + curl: "curl https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" + delete: tags: - - Assistants - summary: Create a thread. - operationId: createThread - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateThreadRequest' + - Invites + summary: 'Delete an invite. If the invite has already been accepted, it cannot be deleted.' + operationId: delete-invite + parameters: + - name: invite_id + in: path + description: The ID of the invite to delete. + required: true + schema: + type: string responses: '200': - description: OK + description: Invite deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/ThreadObject' + $ref: '#/components/schemas/InviteDeleteResponse' x-oaiMeta: - name: Create thread - group: threads - beta: true - returns: 'A [thread](/docs/api-reference/threads) object.' + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted examples: - - title: Empty - request: - curl: "curl https://api.openai.com/v1/threads \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d ''\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nempty_thread = client.beta.threads.create()\nprint(empty_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const emptyThread = await openai.beta.threads.create();\n\n console.log(emptyThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699012949,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - - title: Messages - request: - curl: "curl https://api.openai.com/v1/threads \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-H \"OpenAI-Beta: assistants=v2\" \\\n-d '{\n \"messages\": [{\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n }, {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage_thread = client.beta.threads.create(\n messages=[\n {\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n },\n {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n },\n ]\n)\n\nprint(message_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messageThread = await openai.beta.threads.create({\n messages: [\n {\n role: \"user\",\n content: \"Hello, what is AI?\"\n },\n {\n role: \"user\",\n content: \"How does AI work? Explain it in simple terms.\",\n },\n ],\n });\n\n console.log(messageThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - '/threads/{thread_id}': + request: + curl: "curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.invite.deleted\",\n \"id\": \"invite-abc\",\n \"deleted\": true\n}\n" + /organization/projects: get: tags: - - Assistants - summary: Retrieves a thread. - operationId: getThread + - Projects + summary: Returns a list of projects. + operationId: list-projects parameters: - - name: thread_id - in: path - description: The ID of the thread to retrieve. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string + - name: include_archived + in: query + description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + schema: + type: boolean + default: false responses: '200': - description: OK + description: Projects listed successfully. content: application/json: schema: - $ref: '#/components/schemas/ThreadObject' + $ref: '#/components/schemas/ProjectListResponse' x-oaiMeta: - name: Retrieve thread - group: threads - beta: true - returns: 'The [thread](/docs/api-reference/threads/object) object matching the specified ID.' + name: List projects + group: administration + returns: 'A list of [Project](/docs/api-reference/projects/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_thread = client.beta.threads.retrieve(\"thread_abc123\")\nprint(my_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myThread = await openai.beta.threads.retrieve(\n \"thread_abc123\"\n );\n\n console.log(myThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": []\n }\n }\n}\n" + curl: "curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n }\n ],\n \"first_id\": \"proj-abc\",\n \"last_id\": \"proj-xyz\",\n \"has_more\": false\n}\n" post: tags: - - Assistants - summary: Modifies a thread. - operationId: modifyThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to modify. Only the `metadata` can be modified. - required: true - schema: - type: string + - Projects + summary: 'Create a new project in the organization. Projects can be created and archived, but cannot be deleted.' + operationId: create-project requestBody: + description: The project create request payload. content: application/json: schema: - $ref: '#/components/schemas/ModifyThreadRequest' + $ref: '#/components/schemas/ProjectCreateRequest' required: true responses: '200': - description: OK + description: Project created successfully. content: application/json: schema: - $ref: '#/components/schemas/ThreadObject' + $ref: '#/components/schemas/Project' x-oaiMeta: - name: Modify thread - group: threads - beta: true - returns: 'The modified [thread](/docs/api-reference/threads/object) object matching the specified ID.' + name: Create project + group: administration + returns: 'The created [Project](/docs/api-reference/projects/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_thread = client.beta.threads.update(\n \"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n)\nprint(my_updated_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const updatedThread = await openai.beta.threads.update(\n \"thread_abc123\",\n {\n metadata: { modified: \"true\", user: \"abc123\" },\n }\n );\n\n console.log(updatedThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n },\n \"tool_resources\": {}\n}\n" - delete: + curl: "curl -X POST https://api.openai.com/v1/organization/projects \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project ABC\"\n }'\n" + response: + content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project ABC\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" + '/organization/projects/{project_id}': + get: tags: - - Assistants - summary: Delete a thread. - operationId: deleteThread + - Projects + summary: Retrieves a project. + operationId: retrieve-project parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to delete. + description: The ID of the project. required: true schema: type: string responses: '200': - description: OK + description: Project retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteThreadResponse' + $ref: '#/components/schemas/Project' x-oaiMeta: - name: Delete thread - group: threads - beta: true - returns: Deletion status - examples: + name: Retrieve project + group: administration + description: Retrieve a project. + returns: 'The [Project](/docs/api-reference/projects/object) object matching the specified ID.' + examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.threads.delete(\"thread_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.threads.del(\"thread_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread.deleted\",\n \"deleted\": true\n}\n" - '/threads/{thread_id}/messages': + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" + post: + tags: + - Projects + summary: Modifies a project in the organization. + operationId: modify-project + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project update request payload. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUpdateRequest' + required: true + responses: + '200': + description: Project updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/Project' + '400': + description: Error response when updating the default project. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project + group: administration + returns: 'The updated [Project](/docs/api-reference/projects/object) object.' + examples: + request: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project DEF\"\n }'\n" + '/organization/projects/{project_id}/api_keys': get: tags: - - Assistants - summary: Returns a list of messages for a given thread. - operationId: listMessages + - Projects + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) the messages belong to.' + description: The ID of the project. required: true schema: type: string @@ -1273,252 +1606,238 @@ paths: schema: type: integer default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: run_id - in: query - description: "Filter messages by the run ID that generated them.\n" - schema: - type: string responses: '200': - description: OK + description: Project API keys listed successfully. content: application/json: schema: - $ref: '#/components/schemas/ListMessagesResponse' + $ref: '#/components/schemas/ProjectApiKeyListResponse' x-oaiMeta: - name: List messages - group: threads - beta: true - returns: 'A list of [message](/docs/api-reference/messages) objects.' + name: List project API keys + group: administration + returns: 'A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_messages = client.beta.threads.messages.list(\"thread_abc123\")\nprint(thread_messages.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.list(\n \"thread_abc123\"\n );\n\n console.log(threadMessages.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n },\n {\n \"id\": \"msg_abc456\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hello, what is AI?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n }\n ],\n \"first_id\": \"msg_abc123\",\n \"last_id\": \"msg_abc456\",\n \"has_more\": false\n}\n" - post: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n }\n ],\n \"first_id\": \"key_abc\",\n \"last_id\": \"key_xyz\",\n \"has_more\": false\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" + '/organization/projects/{project_id}/api_keys/{key_id}': + get: tags: - - Assistants - summary: Create a message. - operationId: createMessage + - Projects + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to create a message for.' + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateMessageRequest' - required: true responses: '200': - description: OK + description: Project API key retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/MessageObject' + $ref: '#/components/schemas/ProjectApiKey' x-oaiMeta: - name: Create message - group: threads - beta: true - returns: 'A [message](/docs/api-reference/messages/object) object.' + name: Retrieve project API key + group: administration + returns: 'The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_message = client.beta.threads.messages.create(\n \"thread_abc123\",\n role=\"user\",\n content=\"How does AI work? Explain it in simple terms.\",\n)\nprint(thread_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.create(\n \"thread_abc123\",\n { role: \"user\", content: \"How does AI work? Explain it in simple terms.\" }\n );\n\n console.log(threadMessages);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1713226573,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" - '/threads/{thread_id}/messages/{message_id}': - get: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" + delete: tags: - - Assistants - summary: Retrieve a message. - operationId: getMessage + - Projects + summary: Deletes an API key from the project. + operationId: delete-project-api-key parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this message belongs.' + description: The ID of the project. required: true schema: type: string - - name: message_id + - name: key_id in: path - description: The ID of the message to retrieve. + description: The ID of the API key. required: true schema: type: string responses: '200': - description: OK + description: Project API key deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/MessageObject' + $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Retrieve message - group: threads - beta: true - returns: 'The [message](/docs/api-reference/messages/object) object matching the specified ID.' + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a service account examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.retrieve(\n message_id=\"msg_abc123\",\n thread_id=\"thread_abc123\",\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.retrieve(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(message);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" + curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.api_key.deleted\",\n \"id\": \"key_abc\",\n \"deleted\": true\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"API keys cannot be deleted for service accounts, please delete the service account\"\n}\n" + '/organization/projects/{project_id}/archive': post: tags: - - Assistants - summary: Modifies a message. - operationId: modifyMessage + - Projects + summary: Archives a project in the organization. Archived projects cannot be used or updated. + operationId: archive-project parameters: - - name: thread_id - in: path - description: The ID of the thread to which this message belongs. - required: true - schema: - type: string - - name: message_id + - name: project_id in: path - description: The ID of the message to modify. + description: The ID of the project. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyMessageRequest' - required: true responses: '200': - description: OK + description: Project archived successfully. content: application/json: schema: - $ref: '#/components/schemas/MessageObject' + $ref: '#/components/schemas/Project' x-oaiMeta: - name: Modify message - group: threads - beta: true - returns: 'The modified [message](/docs/api-reference/messages/object) object.' + name: Archive project + group: administration + returns: 'The archived [Project](/docs/api-reference/projects/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.update(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\",\n },\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.update(\n \"thread_abc123\",\n \"msg_abc123\",\n {\n metadata: {\n modified: \"true\",\n user: \"abc123\",\n },\n }\n }'" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"file_ids\": [],\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n}\n" - delete: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project DEF\",\n \"created_at\": 1711471533,\n \"archived_at\": 1711471533,\n \"status\": \"archived\"\n}\n" + '/organization/projects/{project_id}/rate_limits': + get: tags: - - Assistants - summary: Deletes a message. - operationId: deleteMessage + - Projects + summary: Returns the rate limits per model for a project. + operationId: list-project-rate-limits parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to which this message belongs. + description: The ID of the project. required: true schema: type: string - - name: message_id - in: path - description: The ID of the message to delete. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. The default is 100.\n" + schema: + type: integer + default: 100 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, beginning with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string responses: '200': - description: OK + description: Project rate limits listed successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteMessageResponse' + $ref: '#/components/schemas/ProjectRateLimitListResponse' x-oaiMeta: - name: Delete message - group: threads - beta: true - returns: Deletion status + name: List project rate limits + group: administration + returns: 'A list of [ProjectRateLimit](/docs/api-reference/project-rate-limits/object) objects.' examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_message = client.beta.threads.messages.delete(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n)\nprint(deleted_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedMessage = await openai.beta.threads.messages.del(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(deletedMessage);\n}" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message.deleted\",\n \"deleted\": true\n}\n" - /threads/runs: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/rate_limits?after=rl_xxx&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"project.rate_limit\",\n \"id\": \"rl-ada\",\n \"model\": \"ada\",\n \"max_requests_per_1_minute\": 600,\n \"max_tokens_per_1_minute\": 150000,\n \"max_images_per_1_minute\": 10\n }\n ],\n \"first_id\": \"rl-ada\",\n \"last_id\": \"rl-ada\",\n \"has_more\": false\n}\n" + error_response: "{\n \"code\": 404,\n \"message\": \"The project {project_id} was not found\"\n}\n" + '/organization/projects/{project_id}/rate_limits/{rate_limit_id}': post: tags: - - Assistants - summary: Create a thread and run it in one request. - operationId: createThreadAndRun + - Projects + summary: Updates a project rate limit. + operationId: update-project-rate-limits + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: rate_limit_id + in: path + description: The ID of the rate limit. + required: true + schema: + type: string requestBody: + description: The project rate limit update request payload. content: application/json: schema: - $ref: '#/components/schemas/CreateThreadAndRunRequest' + $ref: '#/components/schemas/ProjectRateLimitUpdateRequest' required: true responses: '200': - description: OK + description: Project rate limit updated successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectRateLimit' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Create thread and run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' + name: Modify project rate limit + group: administration + returns: 'The updated [ProjectRateLimit](/docs/api-reference/project-rate-limits/object) object.' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.create_and_run(\n assistant_id=\"asst_abc123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_abc123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Explain deep learning to a 5 year old.\" },\n ],\n },\n });\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076792,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": null,\n \"expires_at\": 1699077392,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"required_action\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant.\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completion_tokens\": null,\n \"max_prompt_tokens\": null,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"incomplete_details\": null,\n \"usage\": null,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.create_and_run(\n assistant_id=\"asst_123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Hello\" },\n ],\n },\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710348075,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}], \"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\n{\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1713226836,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1713226837,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.create_and_run(\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"What is the weather like in San Francisco?\" },\n ],\n },\n tools: tools,\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710351818,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"\",\"output\":null}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"{\\\"\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"location\"}}]}}}\n\n...\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"ahrenheit\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"\\\"}\"}}]}}}\n\nevent: thread.run.requires_action\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"requires_action\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":{\"type\":\"submit_tool_outputs\",\"submit_tool_outputs\":{\"tool_calls\":[{\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\"}}]}},\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs': + request: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/rate_limits/rl_xxx \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"max_requests_per_1_minute\": 500\n }'\n" + response: "{\n \"object\": \"project.rate_limit\",\n \"id\": \"rl-ada\",\n \"model\": \"ada\",\n \"max_requests_per_1_minute\": 600,\n \"max_tokens_per_1_minute\": 150000,\n \"max_images_per_1_minute\": 10\n }\n" + error_response: "{\n \"code\": 404,\n \"message\": \"The project {project_id} was not found\"\n}\n" + '/organization/projects/{project_id}/service_accounts': get: tags: - - Assistants - summary: Returns a list of runs belonging to a thread. - operationId: listRuns + - Projects + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread the run belongs to. + description: The ID of the project. required: true schema: type: string @@ -1528,1550 +1847,1844 @@ paths: schema: type: integer default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string responses: '200': - description: OK + description: Project service accounts listed successfully. content: application/json: schema: - $ref: '#/components/schemas/ListRunsResponse' + $ref: '#/components/schemas/ProjectServiceAccountListResponse' + '400': + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: List runs - group: threads - beta: true - returns: 'A list of [run](/docs/api-reference/runs/object) objects.' + name: List project service accounts + group: administration + returns: 'A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nruns = client.beta.threads.runs.list(\n \"thread_abc123\"\n)\n\nprint(runs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const runs = await openai.beta.threads.runs.list(\n \"thread_abc123\"\n );\n\n console.log(runs);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n },\n {\n \"id\": \"run_abc456\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n }\n ],\n \"first_id\": \"run_abc123\",\n \"last_id\": \"run_abc456\",\n \"has_more\": false\n}\n" + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n ],\n \"first_id\": \"svc_acct_abc\",\n \"last_id\": \"svc_acct_xyz\",\n \"has_more\": false\n}\n" post: tags: - - Assistants - summary: Create a run. - operationId: createRun + - Projects + summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. + operationId: create-project-service-account parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to run. + description: The ID of the project. required: true schema: type: string requestBody: + description: The project service account create request payload. content: application/json: schema: - $ref: '#/components/schemas/CreateRunRequest' + $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' required: true responses: '200': - description: OK + description: Project service account created successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' + '400': + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Create run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' + name: Create project service account + group: administration + returns: 'The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object.' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n { assistant_id: \"asst_abc123\" }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_123\",\n assistant_id=\"asst_123\",\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_123\",\n { assistant_id: \"asst_123\", stream: true }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710330641,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710330642,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710330642,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710330641,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710330642,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n {\n assistant_id: \"asst_abc123\",\n tools: tools,\n stream: true\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710348075,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710348075,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710348077,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}': + request: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Production App\"\n }'\n" + response: + content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Production App\",\n \"role\": \"member\",\n \"created_at\": 1711471533,\n \"api_key\": {\n \"object\": \"organization.project.service_account.api_key\",\n \"value\": \"sk-abcdefghijklmnop123\",\n \"name\": \"Secret Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\"\n }\n}\n" + '/organization/projects/{project_id}/service_accounts/{service_account_id}': get: tags: - - Assistants - summary: Retrieves a run. - operationId: getRun + - Projects + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + description: The ID of the project. required: true schema: type: string - - name: run_id + - name: service_account_id in: path - description: The ID of the run to retrieve. + description: The ID of the service account. required: true schema: type: string responses: '200': - description: OK + description: Project service account retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectServiceAccount' x-oaiMeta: - name: Retrieve run - group: threads - beta: true - returns: 'The [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: Retrieve project service account + group: administration + returns: 'The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.retrieve(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - post: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" + delete: tags: - - Assistants - summary: Modifies a run. - operationId: modifyRun + - Projects + summary: Deletes a service account from the project. + operationId: delete-project-service-account parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + description: The ID of the project. required: true schema: type: string - - name: run_id + - name: service_account_id in: path - description: The ID of the run to modify. + description: The ID of the service account. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyRunRequest' - required: true responses: '200': - description: OK + description: Project service account deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' x-oaiMeta: - name: Modify run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: Delete project service account + group: administration + returns: 'Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.update(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n metadata={\"user_id\": \"user_abc123\"},\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.update(\n \"thread_abc123\",\n \"run_abc123\",\n {\n metadata: {\n user_id: \"user_abc123\",\n },\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/submit_tool_outputs': - post: + curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.service_account.deleted\",\n \"id\": \"svc_acct_abc\",\n \"deleted\": true\n}\n" + '/organization/projects/{project_id}/users': + get: tags: - - Assistants - summary: "When a run has the `status: \"requires_action\"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.\n" - operationId: submitToolOuputsToRun + - Projects + summary: Returns a list of users in the project. + operationId: list-project-users parameters: - - name: thread_id + - name: project_id in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this run belongs.' + description: The ID of the project. required: true schema: type: string - - name: run_id - in: path - description: The ID of the run that requires the tool output submission. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SubmitToolOutputsRunRequest' - required: true + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string responses: '200': - description: OK + description: Project users listed successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectUserListResponse' + '400': + description: Error response when project is archived. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Submit tool outputs to run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: List project users + group: administration + returns: 'A list of [ProjectUser](/docs/api-reference/project-users/object) objects.' examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075592,\n \"assistant_id\": \"asst_123\",\n \"thread_id\": \"thread_123\",\n \"status\": \"queued\",\n \"started_at\": 1699075592,\n \"expires_at\": 1699076192,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710352449,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352475,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"id\":\"call_iWr0kQ2EaYMaxNdl0v3KYkx7\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\",\"output\":\"70 degrees and sunny.\"}}]},\"usage\":{\"prompt_tokens\":291,\"completion_tokens\":24,\"total_tokens\":315}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":1710352448,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710352475,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"The\",\"annotations\":[]}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" current\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" weather\"}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" sunny\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\".\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710352477,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352477,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":{\"prompt_tokens\":329,\"completion_tokens\":18,\"total_tokens\":347}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710352475,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710352477,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}/cancel': + request: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" post: tags: - - Assistants - summary: Cancels a run that is `in_progress`. - operationId: cancelRun + - Projects + summary: Adds a user to the project. Users must already be members of the organization to be added to a project. + operationId: create-project-user parameters: - - name: thread_id - in: path - description: The ID of the thread to which this run belongs. - required: true - schema: - type: string - - name: run_id + - name: project_id in: path - description: The ID of the run to cancel. + description: The ID of the project. required: true schema: type: string + requestBody: + description: The project user create request payload. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserCreateRequest' + required: true responses: '200': - description: OK + description: User added to project successfully. content: application/json: schema: - $ref: '#/components/schemas/RunObject' + $ref: '#/components/schemas/ProjectUser' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Cancel a run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' + name: Create project user + group: administration + returns: 'The created [ProjectUser](/docs/api-reference/project-users/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.cancel(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.cancel(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076126,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"cancelling\",\n \"started_at\": 1699076126,\n \"expires_at\": 1699076726,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You summarize books.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps': + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"user_id\": \"user_abc\",\n \"role\": \"member\"\n }'\n" + response: + content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + error_response: + content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" + '/organization/projects/{project_id}/users/{user_id}': get: tags: - - Assistants - summary: Returns a list of run steps belonging to a run. - operationId: listRunSteps + - Projects + summary: Retrieves a user in the project. + operationId: retrieve-project-user parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread the run and run steps belong to. + description: The ID of the project. required: true schema: type: string - - name: run_id + - name: user_id in: path - description: The ID of the run the run steps belong to. + description: The ID of the user. required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string responses: '200': - description: OK + description: Project user retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/ListRunStepsResponse' + $ref: '#/components/schemas/ProjectUser' x-oaiMeta: - name: List run steps - group: threads - beta: true - returns: 'A list of [run step](/docs/api-reference/runs/step-object) objects.' + name: Retrieve project user + group: administration + returns: 'The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_steps = client.beta.threads.runs.steps.list(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run_steps)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.list(\n \"thread_abc123\",\n \"run_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n }\n ],\n \"first_id\": \"step_abc123\",\n \"last_id\": \"step_abc456\",\n \"has_more\": false\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps/{step_id}': - get: + curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + post: tags: - - Assistants - summary: Retrieves a run step. - operationId: getRunStep + - Projects + summary: Modifies a user's role in the project. + operationId: modify-project-user parameters: - - name: thread_id + - name: project_id in: path - description: The ID of the thread to which the run and run step belongs. + description: The ID of the project. required: true schema: type: string - - name: run_id + - name: user_id in: path - description: The ID of the run to which the run step belongs. + description: The ID of the user. required: true schema: type: string - - name: step_id + requestBody: + description: The project user update request payload. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUserUpdateRequest' + required: true + responses: + '200': + description: Project user's role updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/ProjectUser' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + x-oaiMeta: + name: Modify project user + group: administration + returns: 'The updated [ProjectUser](/docs/api-reference/project-users/object) object.' + examples: + request: + curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" + response: + content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + delete: + tags: + - Projects + summary: Deletes a user from the project. + operationId: delete-project-user + parameters: + - name: project_id in: path - description: The ID of the run step to retrieve. + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. required: true schema: type: string responses: '200': - description: OK + description: Project user deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/RunStepObject' + $ref: '#/components/schemas/ProjectUserDeleteResponse' + '400': + description: Error response for various conditions. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' x-oaiMeta: - name: Retrieve run step - group: threads - beta: true - returns: 'The [run step](/docs/api-reference/runs/step-object) object matching the specified ID.' + name: Delete project user + group: administration + returns: 'Confirmation that project has been deleted or an error in case of an archived project, which has no users' examples: request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_step = client.beta.threads.runs.steps.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n step_id=\"step_abc123\"\n)\n\nprint(run_step)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.retrieve(\n \"thread_abc123\",\n \"run_abc123\",\n \"step_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - /vector_stores: + curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.project.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" + /organization/usage/audio_speeches: get: tags: - - Vector Stores - summary: Returns a list of vector stores. - operationId: listVectorStores + - Usage + summary: Get audio speeches usage details for the organization. + operationId: usage-audio-speeches parameters: - - name: limit + - name: start_time in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + description: 'Start time (Unix seconds) of the query time range, inclusive.' + required: true schema: type: integer - default: 20 - - name: order + - name: end_time in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + description: 'End time (Unix seconds) of the query time range, exclusive.' schema: - enum: - - asc - - desc - type: string - default: desc - - name: after + type: integer + - name: bucket_width in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' schema: + enum: + - 1m + - 1h + - 1d type: string - - name: before + default: 1d + - name: project_ids in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: Return only usage for these projects. + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: 'Group the usage data by the specified fields. Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.' + schema: + type: array + items: + enum: + - project_id + - user_id + - api_key_id + - model + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: type: string responses: '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorStoresResponse' - x-oaiMeta: - name: List vector stores - group: vector_stores - beta: true - returns: 'A list of [vector store](/docs/api-reference/vector-stores/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_stores = client.beta.vector_stores.list()\nprint(vector_stores)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStores = await openai.beta.vectorStores.list();\n console.log(vectorStores);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n },\n {\n \"id\": \"vs_abc456\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ v2\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n }\n ],\n \"first_id\": \"vs_abc123\",\n \"last_id\": \"vs_abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Vector Stores - summary: Create a vector store. - operationId: createVectorStore - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreRequest' - required: true - responses: - '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Create vector store - group: vector_stores - beta: true - returns: 'A [vector store](/docs/api-reference/vector-stores/object) object.' + name: Audio speeches + group: usage-audio-speeches + returns: 'A list of paginated, time bucketed [Audio speeches usage](/docs/api-reference/usage/audio_speeches_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.create(\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.create({\n name: \"Support FAQ\"\n });\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" - '/vector_stores/{vector_store_id}': + curl: "curl \"https://api.openai.com/v1/organization/usage/audio_speeches?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.audio_speeches.result\",\n \"characters\": 45,\n \"num_model_requests\": 1,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/usage/audio_transcriptions: get: tags: - - Vector Stores - summary: Retrieves a vector store. - operationId: getVectorStore + - Usage + summary: Get audio transcriptions usage details for the organization. + operationId: usage-audio-transcriptions parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to retrieve. + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' required: true + schema: + type: integer + - name: end_time + in: query + description: 'End time (Unix seconds) of the query time range, exclusive.' + schema: + type: integer + - name: bucket_width + in: query + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' + schema: + enum: + - 1m + - 1h + - 1d + type: string + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: 'Group the usage data by the specified fields. Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.' + schema: + type: array + items: + enum: + - project_id + - user_id + - api_key_id + - model + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: type: string responses: '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Retrieve vector store - group: vector_stores - beta: true - returns: 'The [vector store](/docs/api-reference/vector-stores/object) object matching the specified ID.' + name: Audio transcriptions + group: usage-audio-transcriptions + returns: 'A list of paginated, time bucketed [Audio transcriptions usage](/docs/api-reference/usage/audio_transcriptions_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.retrieve(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.retrieve(\n \"vs_abc123\"\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776\n}\n" - post: + curl: "curl \"https://api.openai.com/v1/organization/usage/audio_transcriptions?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.audio_transcriptions.result\",\n \"seconds\": 20,\n \"num_model_requests\": 1,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/usage/code_interpreter_sessions: + get: tags: - - Vector Stores - summary: Modifies a vector store. - operationId: modifyVectorStore + - Usage + summary: Get code interpreter sessions usage details for the organization. + operationId: usage-code-interpreter-sessions parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to modify. + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' required: true + schema: + type: integer + - name: end_time + in: query + description: 'End time (Unix seconds) of the query time range, exclusive.' + schema: + type: integer + - name: bucket_width + in: query + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' + schema: + enum: + - 1m + - 1h + - 1d + type: string + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields include `project_id`. + schema: + type: array + items: + enum: + - project_id + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/UpdateVectorStoreRequest' - required: true responses: '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Modify vector store - group: vector_stores - beta: true - returns: 'The modified [vector store](/docs/api-reference/vector-stores/object) object.' + name: Code interpreter sessions + group: usage-code-interpreter-sessions + returns: 'A list of paginated, time bucketed [Code interpreter sessions usage](/docs/api-reference/usage/code_interpreter_sessions_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.update(\n vector_store_id=\"vs_abc123\",\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.update(\n \"vs_abc123\",\n {\n name: \"Support FAQ\"\n }\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" - delete: + curl: "curl \"https://api.openai.com/v1/organization/usage/code_interpreter_sessions?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.code_interpreter_sessions.result\",\n \"sessions\": 1,\n \"project_id\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/usage/completions: + get: tags: - - Vector Stores - summary: Delete a vector store. - operationId: deleteVectorStore + - Usage + summary: Get completions usage details for the organization. + operationId: usage-completions parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to delete. + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' required: true + schema: + type: integer + - name: end_time + in: query + description: 'End time (Unix seconds) of the query time range, exclusive.' + schema: + type: integer + - name: bucket_width + in: query + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' + schema: + enum: + - 1m + - 1h + - 1d + type: string + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + schema: + type: array + items: + type: string + - name: batch + in: query + description: "If `true`, return batch jobs only. If `false`, return non-batch jobs only. By default, return both.\n" + schema: + type: boolean + - name: group_by + in: query + description: 'Group the usage data by the specified fields. Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `batch` or any combination of them.' + schema: + type: array + items: + enum: + - project_id + - user_id + - api_key_id + - model + - batch + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: type: string responses: '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteVectorStoreResponse' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Delete vector store - group: vector_stores - beta: true - returns: Deletion status + name: Completions + group: usage-completions + returns: 'A list of paginated, time bucketed [Completions usage](/docs/api-reference/usage/completions_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store = client.beta.vector_stores.delete(\n vector_store_id=\"vs_abc123\"\n)\nprint(deleted_vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStore = await openai.beta.vectorStores.del(\n \"vs_abc123\"\n );\n console.log(deletedVectorStore);\n}\n\nmain();\n" - response: "{\n id: \"vs_abc123\",\n object: \"vector_store.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/files': + curl: "curl \"https://api.openai.com/v1/organization/usage/completions?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.completions.result\",\n \"input_tokens\": 1000,\n \"output_tokens\": 500,\n \"input_cached_tokens\": 800,\n \"num_model_requests\": 5,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null,\n \"batch\": null\n }\n ]\n }\n ],\n \"has_more\": true,\n \"next_page\": \"AAAAAGdGxdEiJdKOAAAAAGcqsYA=\"\n}\n" + /organization/usage/embeddings: get: tags: - - Vector Stores - summary: Returns a list of vector store files. - operationId: listVectorStoreFiles + - Usage + summary: Get embeddings usage details for the organization. + operationId: usage-embeddings parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the files belong to. + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' required: true schema: - type: string - - name: limit + type: integer + - name: end_time in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + description: 'End time (Unix seconds) of the query time range, exclusive.' schema: type: integer - default: 20 - - name: order + - name: bucket_width in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' schema: enum: - - asc - - desc + - 1m + - 1h + - 1d type: string - default: desc - - name: after + default: 1d + - name: project_ids in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + description: Return only usage for these projects. schema: - type: string - - name: before + type: array + items: + type: string + - name: user_ids in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + description: Return only usage for these users. schema: - type: string - - name: filter + type: array + items: + type: string + - name: api_key_ids in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' + description: Return only usage for these API keys. + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: 'Group the usage data by the specified fields. Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.' + schema: + type: array + items: + enum: + - project_id + - user_id + - api_key_id + - model + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: - enum: - - in_progress - - completed - - failed - - cancelled type: string responses: '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: List vector store files - group: vector_stores - beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' + name: Embeddings + group: usage-embeddings + returns: 'A list of paginated, time bucketed [Embeddings usage](/docs/api-reference/usage/embeddings_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.files.list(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.files.list(\n \"vs_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" - post: + curl: "curl \"https://api.openai.com/v1/organization/usage/embeddings?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.embeddings.result\",\n \"input_tokens\": 16,\n \"num_model_requests\": 2,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/usage/images: + get: tags: - - Vector Stores - summary: 'Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).' - operationId: createVectorStoreFile + - Usage + summary: Get images usage details for the organization. + operationId: usage-images parameters: - - name: vector_store_id - in: path - description: "The ID of the vector store for which to create a File.\n" + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' required: true + schema: + type: integer + - name: end_time + in: query + description: 'End time (Unix seconds) of the query time range, exclusive.' + schema: + type: integer + - name: bucket_width + in: query + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' + schema: + enum: + - 1m + - 1h + - 1d + type: string + default: 1d + - name: sources + in: query + description: 'Return only usages for these sources. Possible values are `image.generation`, `image.edit`, `image.variation` or any combination of them.' + schema: + type: array + items: + enum: + - image.generation + - image.edit + - image.variation + type: string + - name: sizes + in: query + description: 'Return only usages for these image sizes. Possible values are `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any combination of them.' + schema: + type: array + items: + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1792 + - 1024x1792 + type: string + - name: project_ids + in: query + description: Return only usage for these projects. + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: 'Group the usage data by the specified fields. Support fields include `project_id`, `user_id`, `api_key_id`, `model`, `size`, `source` or any combination of them.' + schema: + type: array + items: + enum: + - project_id + - user_id + - api_key_id + - model + - size + - source + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: type: string - example: vs_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreFileRequest' - required: true responses: '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileObject' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Create vector store file - group: vector_stores - beta: true - returns: 'A [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' + name: Images + group: usage-images + returns: 'A list of paginated, time bucketed [Images usage](/docs/api-reference/usage/images_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_id\": \"file-abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.create(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFile = await openai.beta.vectorStores.files.create(\n \"vs_abc123\",\n {\n file_id: \"file-abc123\"\n }\n );\n console.log(myVectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"usage_bytes\": 1234,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - '/vector_stores/{vector_store_id}/files/{file_id}': + curl: "curl \"https://api.openai.com/v1/organization/usage/images?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.images.result\",\n \"images\": 2,\n \"num_model_requests\": 2,\n \"size\": null,\n \"source\": null,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/usage/moderations: get: tags: - - Vector Stores - summary: Retrieves a vector store file. - operationId: getVectorStoreFile + - Usage + summary: Get moderations usage details for the organization. + operationId: usage-moderations parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file belongs to. + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' required: true schema: + type: integer + - name: end_time + in: query + description: 'End time (Unix seconds) of the query time range, exclusive.' + schema: + type: integer + - name: bucket_width + in: query + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' + schema: + enum: + - 1m + - 1h + - 1d type: string - example: vs_abc123 - - name: file_id - in: path - description: The ID of the file being retrieved. - required: true + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: 'Group the usage data by the specified fields. Support fields include `project_id`, `user_id`, `api_key_id`, `model` or any combination of them.' + schema: + type: array + items: + enum: + - project_id + - user_id + - api_key_id + - model + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: type: string - example: file-abc123 responses: '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileObject' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Retrieve vector store file - group: vector_stores - beta: true - returns: 'The [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' + name: Moderations + group: usage-moderations + returns: 'A list of paginated, time bucketed [Moderations usage](/docs/api-reference/usage/moderations_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.retrieve(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFile = await openai.beta.vectorStores.files.retrieve(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(vectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - delete: + curl: "curl \"https://api.openai.com/v1/organization/usage/moderations?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.moderations.result\",\n \"input_tokens\": 16,\n \"num_model_requests\": 2,\n \"project_id\": null,\n \"user_id\": null,\n \"api_key_id\": null,\n \"model\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/usage/vector_stores: + get: tags: - - Vector Stores - summary: 'Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.' - operationId: deleteVectorStoreFile + - Usage + summary: Get vector stores usage details for the organization. + operationId: usage-vector-stores parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file belongs to. + - name: start_time + in: query + description: 'Start time (Unix seconds) of the query time range, inclusive.' required: true schema: + type: integer + - name: end_time + in: query + description: 'End time (Unix seconds) of the query time range, exclusive.' + schema: + type: integer + - name: bucket_width + in: query + description: 'Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`.' + schema: + enum: + - 1m + - 1h + - 1d type: string - - name: file_id - in: path - description: The ID of the file to delete. - required: true + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields include `project_id`. + schema: + type: array + items: + enum: + - project_id + type: string + - name: limit + in: query + description: "Specifies the number of buckets to return.\n- `bucket_width=1d`: default: 7, max: 31\n- `bucket_width=1h`: default: 24, max: 168\n- `bucket_width=1m`: default: 60, max: 1440\n" + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. schema: type: string responses: '200': - description: OK + description: Usage data retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/DeleteVectorStoreFileResponse' + $ref: '#/components/schemas/UsageResponse' x-oaiMeta: - name: Delete vector store file - group: vector_stores - beta: true - returns: Deletion status + name: Vector stores + group: usage-vector-stores + returns: 'A list of paginated, time bucketed [Vector stores usage](/docs/api-reference/usage/vector_stores_object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file = client.beta.vector_stores.files.delete(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(deleted_vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFile = await openai.beta.vectorStores.files.del(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(deletedVectorStoreFile);\n}\n\nmain();\n" - response: "{\n id: \"file-abc123\",\n object: \"vector_store.file.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/file_batches': - post: + curl: "curl \"https://api.openai.com/v1/organization/usage/vector_stores?start_time=1730419200&limit=1\" \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\"\n" + response: "{\n \"object\": \"page\",\n \"data\": [\n {\n \"object\": \"bucket\",\n \"start_time\": 1730419200,\n \"end_time\": 1730505600,\n \"results\": [\n {\n \"object\": \"orgainzation.usage.vector_stores.result\",\n \"usage_bytes\": 1024,\n \"project_id\": null\n }\n ]\n }\n ],\n \"has_more\": false,\n \"next_page\": null\n}\n" + /organization/users: + get: tags: - - Vector Stores - summary: Create a vector store file batch. - operationId: createVectorStoreFileBatch + - Users + summary: Lists all of the users in the organization. + operationId: list-users parameters: - - name: vector_store_id - in: path - description: "The ID of the vector store for which to create a File Batch.\n" - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - example: vs_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreFileBatchRequest' - required: true responses: '200': - description: OK + description: Users listed successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' + $ref: '#/components/schemas/UserListResponse' x-oaiMeta: - name: Create vector store file batch - group: vector_stores - beta: true - returns: 'A [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' + name: List users + group: administration + returns: 'A list of [User](/docs/api-reference/users/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_ids\": [\"file-abc123\", \"file-abc456\"]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.create(\n vector_store_id=\"vs_abc123\",\n file_ids=[\"file-abc123\", \"file-abc456\"]\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create(\n \"vs_abc123\",\n {\n file_ids: [\"file-abc123\", \"file-abc456\"]\n }\n );\n console.log(myVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}': + curl: "curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" + '/organization/users/{user_id}': get: tags: - - Vector Stores - summary: Retrieves a vector store file batch. - operationId: getVectorStoreFileBatch + - Users + summary: Retrieves a user by their identifier. + operationId: retrieve-user parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file batch belongs to. - required: true - schema: - type: string - example: vs_abc123 - - name: batch_id + - name: user_id in: path - description: The ID of the file batch being retrieved. + description: The ID of the user. required: true schema: type: string - example: vsfb_abc123 responses: '200': - description: OK + description: User retrieved successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' + $ref: '#/components/schemas/User' x-oaiMeta: - name: Retrieve vector store file batch - group: vector_stores - beta: true - returns: 'The [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' + name: Retrieve user + group: administration + returns: 'The [User](/docs/api-reference/users/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.retrieve(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel': + curl: "curl https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" post: tags: - - Vector Stores - summary: Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. - operationId: cancelVectorStoreFileBatch + - Users + summary: Modifies a user's role in the organization. + operationId: modify-user parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file batch belongs to. - required: true - schema: - type: string - - name: batch_id + - name: user_id in: path - description: The ID of the file batch to cancel. + description: The ID of the user. required: true schema: type: string + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + content: + application/json: + schema: + $ref: '#/components/schemas/UserRoleUpdateRequest' + required: true responses: '200': - description: OK + description: User role updated successfully. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' + $ref: '#/components/schemas/User' x-oaiMeta: - name: Cancel vector store file batch - group: vector_stores - beta: true - returns: The modified vector store file batch object. + name: Modify user + group: administration + returns: 'The updated [User](/docs/api-reference/users/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel(\n vector_store_id=\"vs_abc123\",\n file_batch_id=\"vsfb_abc123\"\n)\nprint(deleted_vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(deletedVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"cancelling\",\n \"file_counts\": {\n \"in_progress\": 12,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 15,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/files': - get: + curl: "curl -X POST https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" + response: + content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + delete: tags: - - Vector Stores - summary: Returns a list of vector store files in a batch. - operationId: listFilesInVectorStoreBatch + - Users + summary: Deletes a user from the organization. + operationId: delete-user parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the files belong to. - required: true - schema: - type: string - - name: batch_id + - name: user_id in: path - description: The ID of the file batch that the files belong to. + description: The ID of the user. required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: filter - in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' - schema: - enum: - - in_progress - - completed - - failed - - cancelled - type: string responses: '200': - description: OK + description: User deleted successfully. content: application/json: schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' + $ref: '#/components/schemas/UserDeleteResponse' x-oaiMeta: - name: List vector store files in a batch - group: vector_stores - beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' + name: Delete user + group: administration + returns: Confirmation of the deleted user examples: request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.file_batches.list_files(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" - /batches: + curl: "curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" + response: + content: "{\n \"object\": \"organization.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" + /threads: post: tags: - - Batch - summary: Creates and executes a batch from an uploaded file of requests - operationId: createBatch + - Assistants + summary: Create a thread. + operationId: createThread requestBody: content: application/json: schema: - required: - - input_file_id - - endpoint - - completion_window - type: object - properties: - input_file_id: - type: string - description: "The ID of an uploaded file that contains requests for the new batch.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size.\n" - endpoint: - enum: - - /v1/chat/completions - - /v1/embeddings - - /v1/completions - type: string - description: 'The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.' - completion_window: - enum: - - 24h - type: string - description: The time frame within which the batch should be processed. Currently only `24h` is supported. - metadata: - type: object - additionalProperties: - type: string - description: Optional custom metadata for the batch. - nullable: true - required: true + $ref: '#/components/schemas/CreateThreadRequest' responses: '200': - description: Batch created successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Batch' + $ref: '#/components/schemas/ThreadObject' x-oaiMeta: - name: Create batch - group: batch - returns: 'The created [Batch](/docs/api-reference/batch/object) object.' + name: Create thread + group: threads + beta: true + returns: 'A [thread](/docs/api-reference/threads) object.' examples: - request: - curl: "curl https://api.openai.com/v1/batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input_file_id\": \"file-abc123\",\n \"endpoint\": \"/v1/chat/completions\",\n \"completion_window\": \"24h\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.create(\n input_file_id=\"file-abc123\",\n endpoint=\"/v1/chat/completions\",\n completion_window=\"24h\"\n)\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.create({\n input_file_id: \"file-abc123\",\n endpoint: \"/v1/chat/completions\",\n completion_window: \"24h\"\n });\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"validating\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": null,\n \"expires_at\": null,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 0,\n \"completed\": 0,\n \"failed\": 0\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - get: + - title: Empty + request: + curl: "curl https://api.openai.com/v1/threads \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d ''\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nempty_thread = client.beta.threads.create()\nprint(empty_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const emptyThread = await openai.beta.threads.create();\n\n console.log(emptyThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699012949,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" + - title: Messages + request: + curl: "curl https://api.openai.com/v1/threads \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-H \"OpenAI-Beta: assistants=v2\" \\\n-d '{\n \"messages\": [{\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n }, {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage_thread = client.beta.threads.create(\n messages=[\n {\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n },\n {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n },\n ]\n)\n\nprint(message_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messageThread = await openai.beta.threads.create({\n messages: [\n {\n role: \"user\",\n content: \"Hello, what is AI?\"\n },\n {\n role: \"user\",\n content: \"How does AI work? Explain it in simple terms.\",\n },\n ],\n });\n\n console.log(messageThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" + /threads/runs: + post: tags: - - Batch - summary: List your organization's batches. - operationId: listBatches - parameters: - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 + - Assistants + summary: Create a thread and run it in one request. + operationId: createThreadAndRun + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateThreadAndRunRequest' + required: true responses: '200': - description: Batch listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ListBatchesResponse' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: List batch - group: batch - returns: 'A list of paginated [Batch](/docs/api-reference/batch/object) objects.' + name: Create thread and run + group: threads + beta: true + returns: 'A [run](/docs/api-reference/runs/object) object.' examples: - request: - curl: "curl https://api.openai.com/v1/batches?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.list()\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.batches.list();\n\n for await (const batch of list) {\n console.log(batch);\n }\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly job\",\n }\n },\n { ... },\n ],\n \"first_id\": \"batch_abc123\",\n \"last_id\": \"batch_abc456\",\n \"has_more\": true\n}\n" - '/batches/{batch_id}': + - title: Default + request: + curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.create_and_run(\n assistant_id=\"asst_abc123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_abc123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Explain deep learning to a 5 year old.\" },\n ],\n },\n });\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076792,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": null,\n \"expires_at\": 1699077392,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"required_action\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant.\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completion_tokens\": null,\n \"max_prompt_tokens\": null,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"incomplete_details\": null,\n \"usage\": null,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.create_and_run(\n assistant_id=\"asst_123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Hello\" },\n ],\n },\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710348075,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}], \"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\n{\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1713226836,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1713226837,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: done\ndata: [DONE]\n" + - title: Streaming with Functions + request: + curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.create_and_run(\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"What is the weather like in San Francisco?\" },\n ],\n },\n tools: tools,\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710351818,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"\",\"output\":null}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"{\\\"\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"location\"}}]}}}\n\n...\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"ahrenheit\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"\\\"}\"}}]}}}\n\nevent: thread.run.requires_action\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"requires_action\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":{\"type\":\"submit_tool_outputs\",\"submit_tool_outputs\":{\"tool_calls\":[{\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\"}}]}},\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + '/threads/{thread_id}': get: tags: - - Batch - summary: Retrieves a batch. - operationId: retrieveBatch + - Assistants + summary: Retrieves a thread. + operationId: getThread parameters: - - name: batch_id + - name: thread_id in: path - description: The ID of the batch to retrieve. + description: The ID of the thread to retrieve. required: true schema: type: string responses: '200': - description: Batch retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Batch' + $ref: '#/components/schemas/ThreadObject' x-oaiMeta: - name: Retrieve batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' + name: Retrieve thread + group: threads + beta: true + returns: 'The [thread](/docs/api-reference/threads/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.retrieve(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.retrieve(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - '/batches/{batch_id}/cancel': + curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_thread = client.beta.threads.retrieve(\"thread_abc123\")\nprint(my_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myThread = await openai.beta.threads.retrieve(\n \"thread_abc123\"\n );\n\n console.log(myThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": []\n }\n }\n}\n" post: tags: - - Batch - summary: 'Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file.' - operationId: cancelBatch + - Assistants + summary: Modifies a thread. + operationId: modifyThread parameters: - - name: batch_id + - name: thread_id in: path - description: The ID of the batch to cancel. + description: The ID of the thread to modify. Only the `metadata` can be modified. required: true schema: type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ModifyThreadRequest' + required: true responses: '200': - description: Batch is cancelling. Returns the cancelling batch's details. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Batch' + $ref: '#/components/schemas/ThreadObject' x-oaiMeta: - name: Cancel batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' + name: Modify thread + group: threads + beta: true + returns: 'The modified [thread](/docs/api-reference/threads/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.cancel(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.cancel(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"cancelling\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": 1711475133,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 23,\n \"failed\": 1\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - /organization/audit_logs: - get: + curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_thread = client.beta.threads.update(\n \"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n)\nprint(my_updated_thread)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const updatedThread = await openai.beta.threads.update(\n \"thread_abc123\",\n {\n metadata: { modified: \"true\", user: \"abc123\" },\n }\n );\n\n console.log(updatedThread);\n}\n\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n },\n \"tool_resources\": {}\n}\n" + delete: tags: - - Audit Logs - summary: List user actions and configuration changes within this organization. - operationId: list-audit-logs + - Assistants + summary: Delete a thread. + operationId: deleteThread parameters: - - name: effective_at - in: query - description: Return only events whose `effective_at` (Unix seconds) is in this range. - schema: - type: object - properties: - gt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than this value. - gte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. - lt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than this value. - lte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. - - name: 'project_ids[]' - in: query - description: Return only events for these projects. - schema: - type: array - items: - type: string - - name: 'event_types[]' - in: query - description: 'Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object).' - schema: - type: array - items: - $ref: '#/components/schemas/AuditLogEventType' - - name: 'actor_ids[]' - in: query - description: 'Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID.' - schema: - type: array - items: - type: string - - name: 'actor_emails[]' - in: query - description: Return only events performed by users with these emails. - schema: - type: array - items: - type: string - - name: 'resource_ids[]' - in: query - description: 'Return only events performed on these targets. For example, a project ID updated.' - schema: - type: array - items: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + - name: thread_id + in: path + description: The ID of the thread to delete. + required: true schema: type: string responses: '200': - description: Audit logs listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ListAuditLogsResponse' + $ref: '#/components/schemas/DeleteThreadResponse' x-oaiMeta: - name: List audit logs - group: audit-logs - returns: 'A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects.' + name: Delete thread + group: threads + beta: true + returns: Deletion status examples: request: - curl: "curl https://api.openai.com/v1/organization/audit_logs \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\" \\\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"audit_log-xxx_yyyymmdd\",\n \"type\": \"project.archived\",\n \"effective_at\": 1722461446,\n \"actor\": {\n \"type\": \"api_key\",\n \"api_key\": {\n \"type\": \"user\",\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n }\n }\n },\n \"project.archived\": {\n \"id\": \"proj_abc\"\n },\n },\n {\n \"id\": \"audit_log-yyy__20240101\",\n \"type\": \"api_key.updated\",\n \"effective_at\": 1720804190,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.updated\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource_2.operation_2\"]\n }\n },\n }\n ],\n \"first_id\": \"audit_log-xxx__20240101\",\n \"last_id\": \"audit_log_yyy__20240101\",\n \"has_more\": true\n}\n" - /organization/invites: + curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.threads.delete(\"thread_abc123\")\nprint(response)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.threads.del(\"thread_abc123\");\n\n console.log(response);\n}\nmain();" + response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread.deleted\",\n \"deleted\": true\n}\n" + '/threads/{thread_id}/messages': get: tags: - - Invites - summary: Returns a list of invites in the organization. - operationId: list-invites + - Assistants + summary: Returns a list of messages for a given thread. + operationId: listMessages parameters: + - name: thread_id + in: path + description: 'The ID of the [thread](/docs/api-reference/threads) the messages belong to.' + required: true + schema: + type: string - name: limit in: query description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: type: integer default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string + - name: run_id + in: query + description: "Filter messages by the run ID that generated them.\n" + schema: + type: string responses: '200': - description: Invites listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/InviteListResponse' + $ref: '#/components/schemas/ListMessagesResponse' x-oaiMeta: - name: List invites - group: administration - returns: 'A list of [Invite](/docs/api-reference/invite/object) objects.' + name: List messages + group: threads + beta: true + returns: 'A list of [message](/docs/api-reference/messages) objects.' examples: request: - curl: "curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n }\n ],\n \"first_id\": \"invite-abc\",\n \"last_id\": \"invite-abc\",\n \"has_more\": false\n}\n" + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_messages = client.beta.threads.messages.list(\"thread_abc123\")\nprint(thread_messages.data)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.list(\n \"thread_abc123\"\n );\n\n console.log(threadMessages.data);\n}\n\nmain();" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n },\n {\n \"id\": \"msg_abc456\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hello, what is AI?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n }\n ],\n \"first_id\": \"msg_abc123\",\n \"last_id\": \"msg_abc456\",\n \"has_more\": false\n}\n" post: tags: - - Invites - summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. - operationId: inviteUser - requestBody: - description: The invite request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteRequest' - required: true - responses: - '200': - description: User invited successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Invite' - x-oaiMeta: - name: Create invite - group: administration - returns: 'The created [Invite](/docs/api-reference/invite/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/invites \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"email\": \"user@example.com\",\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": null\n}\n" - '/organization/invites/{invite_id}': - get: - tags: - - Invites - summary: Retrieves an invite. - operationId: retrieve-invite + - Assistants + summary: Create a message. + operationId: createMessage parameters: - - name: invite_id + - name: thread_id in: path - description: The ID of the invite to retrieve. + description: 'The ID of the [thread](/docs/api-reference/threads) to create a message for.' required: true schema: type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateMessageRequest' + required: true responses: '200': - description: Invite retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Invite' + $ref: '#/components/schemas/MessageObject' x-oaiMeta: - name: Retrieve invite - group: administration - returns: 'The [Invite](/docs/api-reference/invite/object) object matching the specified ID.' + name: Create message + group: threads + beta: true + returns: 'A [message](/docs/api-reference/messages/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - delete: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_message = client.beta.threads.messages.create(\n \"thread_abc123\",\n role=\"user\",\n content=\"How does AI work? Explain it in simple terms.\",\n)\nprint(thread_message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.create(\n \"thread_abc123\",\n { role: \"user\", content: \"How does AI work? Explain it in simple terms.\" }\n );\n\n console.log(threadMessages);\n}\n\nmain();" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1713226573,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" + '/threads/{thread_id}/messages/{message_id}': + get: tags: - - Invites - summary: 'Delete an invite. If the invite has already been accepted, it cannot be deleted.' - operationId: delete-invite + - Assistants + summary: Retrieve a message. + operationId: getMessage parameters: - - name: invite_id + - name: thread_id in: path - description: The ID of the invite to delete. + description: 'The ID of the [thread](/docs/api-reference/threads) to which this message belongs.' required: true schema: type: string - responses: - '200': - description: Invite deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteDeleteResponse' - x-oaiMeta: - name: Delete invite - group: administration - returns: Confirmation that the invite has been deleted - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite.deleted\",\n \"id\": \"invite-abc\",\n \"deleted\": true\n} \n" - /organization/users: - get: - tags: - - Users - summary: Lists all of the users in the organization. - operationId: list-users - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + - name: message_id + in: path + description: The ID of the message to retrieve. + required: true schema: type: string responses: '200': - description: Users listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/UserListResponse' + $ref: '#/components/schemas/MessageObject' x-oaiMeta: - name: List users - group: administration - returns: 'A list of [User](/docs/api-reference/users/object) objects.' + name: Retrieve message + group: threads + beta: true + returns: 'The [message](/docs/api-reference/messages/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - '/organization/users/{user_id}': - get: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.retrieve(\n message_id=\"msg_abc123\",\n thread_id=\"thread_abc123\",\n)\nprint(message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.retrieve(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(message);\n}\n\nmain();" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" + post: tags: - - Users - summary: Retrieves a user by their identifier. - operationId: retrieve-user + - Assistants + summary: Modifies a message. + operationId: modifyMessage parameters: - - name: user_id + - name: thread_id in: path - description: The ID of the user. + description: The ID of the thread to which this message belongs. + required: true + schema: + type: string + - name: message_id + in: path + description: The ID of the message to modify. required: true schema: type: string - responses: - '200': - description: User retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Retrieve user - group: administration - returns: 'The [User](/docs/api-reference/users/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - post: - tags: - - Users - summary: Modifies a user's role in the organization. - operationId: modify-user requestBody: - description: The new user role to modify. This must be one of `owner` or `member`. content: application/json: schema: - $ref: '#/components/schemas/UserRoleUpdateRequest' + $ref: '#/components/schemas/ModifyMessageRequest' required: true responses: '200': - description: User role updated successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/User' + $ref: '#/components/schemas/MessageObject' x-oaiMeta: - name: Modify user - group: administration - returns: 'The updated [User](/docs/api-reference/users/object) object.' + name: Modify message + group: threads + beta: true + returns: 'The modified [message](/docs/api-reference/messages/object) object.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.update(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\",\n },\n)\nprint(message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.update(\n \"thread_abc123\",\n \"msg_abc123\",\n {\n metadata: {\n modified: \"true\",\n user: \"abc123\",\n },\n }\n }'" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"file_ids\": [],\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n}\n" delete: tags: - - Users - summary: Deletes a user from the organization. - operationId: delete-user + - Assistants + summary: Deletes a message. + operationId: deleteMessage parameters: - - name: user_id + - name: thread_id in: path - description: The ID of the user. + description: The ID of the thread to which this message belongs. + required: true + schema: + type: string + - name: message_id + in: path + description: The ID of the message to delete. required: true schema: type: string responses: '200': - description: User deleted successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/UserDeleteResponse' + $ref: '#/components/schemas/DeleteMessageResponse' x-oaiMeta: - name: Delete user - group: administration - returns: Confirmation of the deleted user + name: Delete message + group: threads + beta: true + returns: Deletion status examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n} \n" - /organization/projects: + curl: "curl -X DELETE https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_message = client.beta.threads.messages.delete(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n)\nprint(deleted_message)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedMessage = await openai.beta.threads.messages.del(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(deletedMessage);\n}" + response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message.deleted\",\n \"deleted\": true\n}\n" + '/threads/{thread_id}/runs': get: tags: - - Projects - summary: Returns a list of projects. - operationId: list-projects + - Assistants + summary: Returns a list of runs belonging to a thread. + operationId: listRuns parameters: + - name: thread_id + in: path + description: The ID of the thread the run belongs to. + required: true + schema: + type: string - name: limit in: query description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: type: integer default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string - - name: include_archived + - name: before in: query - description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: - type: boolean - default: false + type: string responses: '200': - description: Projects listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectListResponse' + $ref: '#/components/schemas/ListRunsResponse' x-oaiMeta: - name: List projects - group: administration - returns: 'A list of [Project](/docs/api-reference/projects/object) objects.' + name: List runs + group: threads + beta: true + returns: 'A list of [run](/docs/api-reference/runs/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n }\n ],\n \"first_id\": \"proj-abc\",\n \"last_id\": \"proj-xyz\",\n \"has_more\": false\n}\n" + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nruns = client.beta.threads.runs.list(\n \"thread_abc123\"\n)\n\nprint(runs)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const runs = await openai.beta.threads.runs.list(\n \"thread_abc123\"\n );\n\n console.log(runs);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n },\n {\n \"id\": \"run_abc456\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n }\n ],\n \"first_id\": \"run_abc123\",\n \"last_id\": \"run_abc456\",\n \"has_more\": false\n}\n" post: tags: - - Projects - summary: 'Create a new project in the organization. Projects can be created and archived, but cannot be deleted.' - operationId: create-project + - Assistants + summary: Create a run. + operationId: createRun + parameters: + - name: thread_id + in: path + description: The ID of the thread to run. + required: true + schema: + type: string + - name: 'include[]' + in: query + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" + schema: + type: array + items: + enum: + - 'step_details.tool_calls[*].file_search.results[*].content' + type: string requestBody: - description: The project create request payload. content: application/json: schema: - $ref: '#/components/schemas/ProjectCreateRequest' + $ref: '#/components/schemas/CreateRunRequest' required: true responses: '200': - description: Project created successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Project' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Create project - group: administration - returns: 'The created [Project](/docs/api-reference/projects/object) object.' + name: Create run + group: threads + beta: true + returns: 'A [run](/docs/api-reference/runs/object) object.' examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project ABC\"\n }'\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project ABC\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - '/organization/projects/{project_id}': + - title: Default + request: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\"\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n { assistant_id: \"asst_abc123\" }\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/threads/thread_123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_123\",\n assistant_id=\"asst_123\",\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_123\",\n { assistant_id: \"asst_123\", stream: true }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710330641,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710330642,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710330642,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710330641,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710330642,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + - title: Streaming with Functions + request: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n {\n assistant_id: \"asst_abc123\",\n tools: tools,\n stream: true\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710348075,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710348075,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710348077,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + '/threads/{thread_id}/runs/{run_id}': get: tags: - - Projects - summary: Retrieves a project. - operationId: retrieve-project + - Assistants + summary: Retrieves a run. + operationId: getRun parameters: - - name: project_id + - name: thread_id in: path - description: The ID of the project. + description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + required: true + schema: + type: string + - name: run_id + in: path + description: The ID of the run to retrieve. required: true schema: type: string responses: '200': - description: Project retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Project' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Retrieve project - group: administration - description: Retrieve a project. - returns: 'The [Project](/docs/api-reference/projects/object) object matching the specified ID.' + name: Retrieve run + group: threads + beta: true + returns: 'The [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.retrieve(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" post: tags: - - Projects - summary: Modifies a project in the organization. - operationId: modify-project + - Assistants + summary: Modifies a run. + operationId: modifyRun + parameters: + - name: thread_id + in: path + description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + required: true + schema: + type: string + - name: run_id + in: path + description: The ID of the run to modify. + required: true + schema: + type: string requestBody: - description: The project update request payload. content: application/json: schema: - $ref: '#/components/schemas/ProjectUpdateRequest' + $ref: '#/components/schemas/ModifyRunRequest' required: true responses: '200': - description: Project updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - '400': - description: Error response when updating the default project. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Modify project - group: administration - returns: 'The updated [Project](/docs/api-reference/projects/object) object.' + name: Modify run + group: threads + beta: true + returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project DEF\"\n }'\n" - '/organization/projects/{project_id}/archive': + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n }\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.update(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n metadata={\"user_id\": \"user_abc123\"},\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.update(\n \"thread_abc123\",\n \"run_abc123\",\n {\n metadata: {\n user_id: \"user_abc123\",\n },\n }\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + '/threads/{thread_id}/runs/{run_id}/cancel': post: tags: - - Projects - summary: Archives a project in the organization. Archived projects cannot be used or updated. - operationId: archive-project + - Assistants + summary: Cancels a run that is `in_progress`. + operationId: cancelRun parameters: - - name: project_id + - name: thread_id in: path - description: The ID of the project. + description: The ID of the thread to which this run belongs. + required: true + schema: + type: string + - name: run_id + in: path + description: The ID of the run to cancel. required: true schema: type: string responses: '200': - description: Project archived successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/Project' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Archive project - group: administration - returns: 'The archived [Project](/docs/api-reference/projects/object) object.' + name: Cancel a run + group: threads + beta: true + returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project DEF\",\n \"created_at\": 1711471533,\n \"archived_at\": 1711471533,\n \"status\": \"archived\"\n}\n" - '/organization/projects/{project_id}/users': + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.cancel(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.cancel(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076126,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"cancelling\",\n \"started_at\": 1699076126,\n \"expires_at\": 1699076726,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You summarize books.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + '/threads/{thread_id}/runs/{run_id}/steps': get: tags: - - Projects - summary: Returns a list of users in the project. - operationId: list-project-users + - Assistants + summary: Returns a list of run steps belonging to a run. + operationId: listRunSteps parameters: - - name: project_id + - name: thread_id in: path - description: The ID of the project. + description: The ID of the thread the run and run steps belong to. + required: true + schema: + type: string + - name: run_id + in: path + description: The ID of the run the run steps belong to. required: true schema: type: string @@ -3081,653 +3694,2844 @@ paths: schema: type: integer default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc - name: after in: query description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" schema: type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string + - name: 'include[]' + in: query + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" + schema: + type: array + items: + enum: + - 'step_details.tool_calls[*].file_search.results[*].content' + type: string responses: '200': - description: Project users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserListResponse' - '400': - description: Error response when project is archived. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/ListRunStepsResponse' x-oaiMeta: - name: List project users - group: administration - returns: 'A list of [ProjectUser](/docs/api-reference/project-users/object) objects.' + name: List run steps + group: threads + beta: true + returns: 'A list of [run step](/docs/api-reference/run-steps/step-object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - post: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_steps = client.beta.threads.runs.steps.list(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run_steps)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.list(\n \"thread_abc123\",\n \"run_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n }\n ],\n \"first_id\": \"step_abc123\",\n \"last_id\": \"step_abc456\",\n \"has_more\": false\n}\n" + '/threads/{thread_id}/runs/{run_id}/steps/{step_id}': + get: tags: - - Projects - summary: Adds a user to the project. Users must already be members of the organization to be added to a project. - operationId: create-project-user + - Assistants + summary: Retrieves a run step. + operationId: getRunStep parameters: - - name: project_id + - name: thread_id in: path - description: The ID of the project. + description: The ID of the thread to which the run and run step belongs. required: true schema: type: string - requestBody: - description: The project user create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserCreateRequest' - required: true + - name: run_id + in: path + description: The ID of the run to which the run step belongs. + required: true + schema: + type: string + - name: step_id + in: path + description: The ID of the run step to retrieve. + required: true + schema: + type: string + - name: 'include[]' + in: query + description: "A list of additional fields to include in the response. Currently the only supported value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file search result content.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" + schema: + type: array + items: + enum: + - 'step_details.tool_calls[*].file_search.results[*].content' + type: string responses: '200': - description: User added to project successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/RunStepObject' x-oaiMeta: - name: Create project user - group: administration - returns: 'The created [ProjectUser](/docs/api-reference/project-users/object) object.' + name: Retrieve run step + group: threads + beta: true + returns: 'The [run step](/docs/api-reference/run-steps/step-object) object matching the specified ID.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"user_id\": \"user_abc\",\n \"role\": \"member\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/users/{user_id}': - get: + curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_step = client.beta.threads.runs.steps.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n step_id=\"step_abc123\"\n)\n\nprint(run_step)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.retrieve(\n \"thread_abc123\",\n \"run_abc123\",\n \"step_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" + response: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" + '/threads/{thread_id}/runs/{run_id}/submit_tool_outputs': + post: tags: - - Projects - summary: Retrieves a user in the project. - operationId: retrieve-project-user + - Assistants + summary: "When a run has the `status: \"requires_action\"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.\n" + operationId: submitToolOuputsToRun parameters: - - name: project_id + - name: thread_id in: path - description: The ID of the project. + description: 'The ID of the [thread](/docs/api-reference/threads) to which this run belongs.' required: true schema: type: string - - name: user_id + - name: run_id in: path - description: The ID of the user. + description: The ID of the run that requires the tool output submission. required: true schema: type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SubmitToolOutputsRunRequest' + required: true responses: '200': - description: Project user retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectUser' + $ref: '#/components/schemas/RunObject' x-oaiMeta: - name: Retrieve project user - group: administration - returns: 'The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID.' + name: Submit tool outputs to run + group: threads + beta: true + returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + - title: Default + request: + curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n)\n\nprint(run)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n console.log(run);\n}\n\nmain();\n" + response: "{\n \"id\": \"run_123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075592,\n \"assistant_id\": \"asst_123\",\n \"thread_id\": \"thread_123\",\n \"status\": \"queued\",\n \"started_at\": 1699075592,\n \"expires_at\": 1699076192,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + - title: Streaming + request: + curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n \"stream\": true\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n stream=True\n)\n\nfor event in stream:\n print(event)\n" + node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" + response: "event: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710352449,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352475,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"id\":\"call_iWr0kQ2EaYMaxNdl0v3KYkx7\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\",\"output\":\"70 degrees and sunny.\"}}]},\"usage\":{\"prompt_tokens\":291,\"completion_tokens\":24,\"total_tokens\":315}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":1710352448,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710352475,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"The\",\"annotations\":[]}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" current\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" weather\"}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" sunny\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\".\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710352477,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352477,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":{\"prompt_tokens\":329,\"completion_tokens\":18,\"total_tokens\":347}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710352475,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710352477,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" + /uploads: post: tags: - - Projects - summary: Modifies a user's role in the project. - operationId: modify-project-user + - Uploads + summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search#supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" + operationId: createUpload requestBody: - description: The project user update request payload. content: application/json: schema: - $ref: '#/components/schemas/ProjectUserUpdateRequest' + $ref: '#/components/schemas/CreateUploadRequest' required: true responses: '200': - description: Project user's role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/Upload' x-oaiMeta: - name: Modify project user - group: administration - returns: 'The updated [ProjectUser](/docs/api-reference/project-users/object) object.' + name: Create upload + group: uploads + returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `pending`.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - delete: + curl: "curl https://api.openai.com/v1/uploads \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"purpose\": \"fine-tune\",\n \"filename\": \"training_examples.jsonl\",\n \"bytes\": 2147483648,\n \"mime_type\": \"text/jsonl\"\n }'\n" + response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"pending\",\n \"expires_at\": 1719127296\n}\n" + '/uploads/{upload_id}/cancel': + post: tags: - - Projects - summary: Deletes a user from the project. - operationId: delete-project-user + - Uploads + summary: "Cancels the Upload. No Parts may be added after an Upload is cancelled.\n" + operationId: cancelUpload parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: user_id + - name: upload_id in: path - description: The ID of the user. + description: "The ID of the Upload.\n" required: true schema: type: string + example: upload_abc123 responses: '200': - description: Project user deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserDeleteResponse' - '400': - description: Error response for various conditions. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/Upload' x-oaiMeta: - name: Delete project user - group: administration - returns: 'Confirmation that project has been deleted or an error in case of an archived project, which has no users' + name: Cancel upload + group: uploads + returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`.' examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/service_accounts': - get: + curl: "curl https://api.openai.com/v1/uploads/upload_abc123/cancel\n" + response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"cancelled\",\n \"expires_at\": 1719127296\n}\n" + '/uploads/{upload_id}/complete': + post: tags: - - Projects - summary: Returns a list of service accounts in the project. - operationId: list-project-service-accounts + - Uploads + summary: "Completes the [Upload](/docs/api-reference/uploads/object). \n\nWithin the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform.\n\nYou can specify the order of the Parts by passing in an ordered list of the Part IDs.\n\nThe number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed.\n" + operationId: completeUpload parameters: - - name: project_id + - name: upload_id in: path - description: The ID of the project. + description: "The ID of the Upload.\n" required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string + example: upload_abc123 + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CompleteUploadRequest' + required: true responses: '200': - description: Project service accounts listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountListResponse' - '400': - description: Error response when project is archived. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/Upload' x-oaiMeta: - name: List project service accounts - group: administration - returns: 'A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects.' + name: Complete upload + group: uploads + returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n ],\n \"first_id\": \"svc_acct_abc\",\n \"last_id\": \"svc_acct_xyz\",\n \"has_more\": false\n}\n" + curl: "curl https://api.openai.com/v1/uploads/upload_abc123/complete\n -d '{\n \"part_ids\": [\"part_def456\", \"part_ghi789\"]\n }'\n" + response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" + '/uploads/{upload_id}/parts': post: tags: - - Projects - summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. - operationId: create-project-service-account + - Uploads + summary: "Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. \n\nEach Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB.\n\nIt is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete).\n" + operationId: addUploadPart parameters: - - name: project_id + - name: upload_id in: path - description: The ID of the project. + description: "The ID of the Upload.\n" required: true schema: type: string + example: upload_abc123 requestBody: - description: The project service account create request payload. content: - application/json: + multipart/form-data: schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' + $ref: '#/components/schemas/AddUploadPartRequest' required: true responses: '200': - description: Project service account created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' - '400': - description: Error response when project is archived. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/UploadPart' x-oaiMeta: - name: Create project service account - group: administration - returns: 'The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object.' + name: Add upload part + group: uploads + returns: 'The upload [Part](/docs/api-reference/uploads/part-object) object.' examples: request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Production App\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Production App\",\n \"role\": \"member\",\n \"created_at\": 1711471533,\n \"api_key\": {\n \"object\": \"organization.project.service_account.api_key\",\n \"value\": \"sk-abcdefghijklmnop123\",\n \"name\": \"Secret Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\"\n }\n}\n" - '/organization/projects/{project_id}/service_accounts/{service_account_id}': + curl: "curl https://api.openai.com/v1/uploads/upload_abc123/parts\n -F data=\"aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz...\"\n" + response: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719185911,\n \"upload_id\": \"upload_abc123\"\n}\n" + /vector_stores: get: tags: - - Projects - summary: Retrieves a service account in the project. - operationId: retrieve-project-service-account + - Vector stores + summary: Returns a list of vector stores. + operationId: listVectorStores parameters: - - name: project_id - in: path - description: The ID of the project. - required: true + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" schema: + type: integer + default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" schema: type: string responses: '200': - description: Project service account retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectServiceAccount' + $ref: '#/components/schemas/ListVectorStoresResponse' x-oaiMeta: - name: Retrieve project service account - group: administration - returns: 'The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID.' + name: List vector stores + group: vector_stores + beta: true + returns: 'A list of [vector store](/docs/api-reference/vector-stores/object) objects.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - delete: + curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_stores = client.beta.vector_stores.list()\nprint(vector_stores)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStores = await openai.beta.vectorStores.list();\n console.log(vectorStores);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n },\n {\n \"id\": \"vs_abc456\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ v2\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n }\n ],\n \"first_id\": \"vs_abc123\",\n \"last_id\": \"vs_abc456\",\n \"has_more\": false\n}\n" + post: tags: - - Projects - summary: Deletes a service account from the project. - operationId: delete-project-service-account + - Vector stores + summary: Create a vector store. + operationId: createVectorStore + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateVectorStoreRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreObject' + x-oaiMeta: + name: Create vector store + group: vector_stores + beta: true + returns: 'A [vector store](/docs/api-reference/vector-stores/object) object.' + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.create(\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.create({\n name: \"Support FAQ\"\n });\n console.log(vectorStore);\n}\n\nmain();\n" + response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" + '/vector_stores/{vector_store_id}': + get: + tags: + - Vector stores + summary: Retrieves a vector store. + operationId: getVectorStore parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id + - name: vector_store_id in: path - description: The ID of the service account. + description: The ID of the vector store to retrieve. required: true schema: type: string responses: '200': - description: Project service account deleted successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' + $ref: '#/components/schemas/VectorStoreObject' x-oaiMeta: - name: Delete project service account - group: administration - returns: 'Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts' + name: Retrieve vector store + group: vector_stores + beta: true + returns: 'The [vector store](/docs/api-reference/vector-stores/object) object matching the specified ID.' examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account.deleted\",\n \"id\": \"svc_acct_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/api_keys': - get: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.retrieve(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.retrieve(\n \"vs_abc123\"\n );\n console.log(vectorStore);\n}\n\nmain();\n" + response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776\n}\n" + post: tags: - - Projects - summary: Returns a list of API keys in the project. - operationId: list-project-api-keys + - Vector stores + summary: Modifies a vector store. + operationId: modifyVectorStore parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store to modify. required: true schema: type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateVectorStoreRequest' + required: true responses: '200': - description: Project API keys listed successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectApiKeyListResponse' + $ref: '#/components/schemas/VectorStoreObject' x-oaiMeta: - name: List project API keys - group: administration - returns: 'A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects.' + name: Modify vector store + group: vector_stores + beta: true + returns: 'The modified [vector store](/docs/api-reference/vector-stores/object) object.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n }\n ],\n \"first_id\": \"key_abc\",\n \"last_id\": \"key_xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/api_keys/{key_id}': - get: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.update(\n vector_store_id=\"vs_abc123\",\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.update(\n \"vs_abc123\",\n {\n name: \"Support FAQ\"\n }\n );\n console.log(vectorStore);\n}\n\nmain();\n" + response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" + delete: tags: - - Projects - summary: Retrieves an API key in the project. - operationId: retrieve-project-api-key + - Vector stores + summary: Delete a vector store. + operationId: deleteVectorStore parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store to delete. required: true schema: type: string - - name: key_id + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteVectorStoreResponse' + x-oaiMeta: + name: Delete vector store + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store = client.beta.vector_stores.delete(\n vector_store_id=\"vs_abc123\"\n)\nprint(deleted_vector_store)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStore = await openai.beta.vectorStores.del(\n \"vs_abc123\"\n );\n console.log(deletedVectorStore);\n}\n\nmain();\n" + response: "{\n id: \"vs_abc123\",\n object: \"vector_store.deleted\",\n deleted: true\n}\n" + '/vector_stores/{vector_store_id}/file_batches': + post: + tags: + - Vector stores + summary: Create a vector store file batch. + operationId: createVectorStoreFileBatch + parameters: + - name: vector_store_id in: path - description: The ID of the API key. + description: "The ID of the vector store for which to create a File Batch.\n" required: true schema: type: string + example: vs_abc123 + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateVectorStoreFileBatchRequest' + required: true responses: '200': - description: Project API key retrieved successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectApiKey' + $ref: '#/components/schemas/VectorStoreFileBatchObject' x-oaiMeta: - name: Retrieve project API key - group: administration - returns: 'The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID.' + name: Create vector store file batch + group: vector_stores + beta: true + returns: 'A [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' examples: request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" - delete: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_ids\": [\"file-abc123\", \"file-abc456\"]\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.create(\n vector_store_id=\"vs_abc123\",\n file_ids=[\"file-abc123\", \"file-abc456\"]\n)\nprint(vector_store_file_batch)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create(\n \"vs_abc123\",\n {\n file_ids: [\"file-abc123\", \"file-abc456\"]\n }\n );\n console.log(myVectorStoreFileBatch);\n}\n\nmain();\n" + response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" + '/vector_stores/{vector_store_id}/file_batches/{batch_id}': + get: tags: - - Projects - summary: Deletes an API key from the project. - operationId: delete-project-api-key + - Vector stores + summary: Retrieves a vector store file batch. + operationId: getVectorStoreFileBatch parameters: - - name: project_id + - name: vector_store_id in: path - description: The ID of the project. + description: The ID of the vector store that the file batch belongs to. required: true schema: type: string - - name: key_id + example: vs_abc123 + - name: batch_id in: path - description: The ID of the API key. + description: The ID of the file batch being retrieved. required: true schema: type: string + example: vsfb_abc123 responses: '200': - description: Project API key deleted successfully. + description: OK content: application/json: schema: - $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' - '400': - description: Error response for various conditions. + $ref: '#/components/schemas/VectorStoreFileBatchObject' + x-oaiMeta: + name: Retrieve vector store file batch + group: vector_stores + beta: true + returns: 'The [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.retrieve(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_file_batch)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFileBatch);\n}\n\nmain();\n" + response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" + '/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel': + post: + tags: + - Vector stores + summary: Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. + operationId: cancelVectorStoreFileBatch + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the file batch belongs to. + required: true + schema: + type: string + - name: batch_id + in: path + description: The ID of the file batch to cancel. + required: true + schema: + type: string + responses: + '200': + description: OK content: application/json: schema: - $ref: '#/components/schemas/ErrorResponse' + $ref: '#/components/schemas/VectorStoreFileBatchObject' x-oaiMeta: - name: Delete project API key - group: administration - returns: Confirmation of the key's deletion or an error if the key belonged to a service account + name: Cancel vector store file batch + group: vector_stores + beta: true + returns: The modified vector store file batch object. examples: request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key.deleted\",\n \"id\": \"key_abc\",\n \"deleted\": true\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"API keys cannot be deleted for service accounts, please delete the service account\"\n} \n" -components: - schemas: - Error: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel(\n vector_store_id=\"vs_abc123\",\n file_batch_id=\"vsfb_abc123\"\n)\nprint(deleted_vector_store_file_batch)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(deletedVectorStoreFileBatch);\n}\n\nmain();\n" + response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"cancelling\",\n \"file_counts\": {\n \"in_progress\": 12,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 15,\n }\n}\n" + '/vector_stores/{vector_store_id}/file_batches/{batch_id}/files': + get: + tags: + - Vector stores + summary: Returns a list of vector store files in a batch. + operationId: listFilesInVectorStoreBatch + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: batch_id + in: path + description: The ID of the file batch that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string + - name: filter + in: query + description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' + schema: + enum: + - in_progress + - completed + - failed + - cancelled + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListVectorStoreFilesResponse' + x-oaiMeta: + name: List vector store files in a batch + group: vector_stores + beta: true + returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.file_batches.list_files(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_files)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" + '/vector_stores/{vector_store_id}/files': + get: + tags: + - Vector stores + summary: Returns a list of vector store files. + operationId: listVectorStoreFiles + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" + schema: + type: integer + default: 20 + - name: order + in: query + description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" + schema: + enum: + - asc + - desc + type: string + default: desc + - name: after + in: query + description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" + schema: + type: string + - name: before + in: query + description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" + schema: + type: string + - name: filter + in: query + description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' + schema: + enum: + - in_progress + - completed + - failed + - cancelled + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListVectorStoreFilesResponse' + x-oaiMeta: + name: List vector store files + group: vector_stores + beta: true + returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.files.list(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store_files)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.files.list(\n \"vs_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" + response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" + post: + tags: + - Vector stores + summary: 'Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).' + operationId: createVectorStoreFile + parameters: + - name: vector_store_id + in: path + description: "The ID of the vector store for which to create a File.\n" + required: true + schema: + type: string + example: vs_abc123 + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateVectorStoreFileRequest' + required: true + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreFileObject' + x-oaiMeta: + name: Create vector store file + group: vector_stores + beta: true + returns: 'A [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_id\": \"file-abc123\"\n }'\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.create(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFile = await openai.beta.vectorStores.files.create(\n \"vs_abc123\",\n {\n file_id: \"file-abc123\"\n }\n );\n console.log(myVectorStoreFile);\n}\n\nmain();\n" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"usage_bytes\": 1234,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" + '/vector_stores/{vector_store_id}/files/{file_id}': + get: + tags: + - Vector stores + summary: Retrieves a vector store file. + operationId: getVectorStoreFile + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the file belongs to. + required: true + schema: + type: string + example: vs_abc123 + - name: file_id + in: path + description: The ID of the file being retrieved. + required: true + schema: + type: string + example: file-abc123 + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreFileObject' + x-oaiMeta: + name: Retrieve vector store file + group: vector_stores + beta: true + returns: 'The [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.retrieve(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFile = await openai.beta.vectorStores.files.retrieve(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(vectorStoreFile);\n}\n\nmain();\n" + response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" + delete: + tags: + - Vector stores + summary: 'Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.' + operationId: deleteVectorStoreFile + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the file belongs to. + required: true + schema: + type: string + - name: file_id + in: path + description: The ID of the file to delete. + required: true + schema: + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteVectorStoreFileResponse' + x-oaiMeta: + name: Delete vector store file + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" + python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file = client.beta.vector_stores.files.delete(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(deleted_vector_store_file)\n" + node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFile = await openai.beta.vectorStores.files.del(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(deletedVectorStoreFile);\n}\n\nmain();\n" + response: "{\n id: \"file-abc123\",\n object: \"vector_store.file.deleted\",\n deleted: true\n}\n" +components: + schemas: + AddUploadPartRequest: + required: + - data + type: object + properties: + data: + type: string + description: "The chunk of bytes for this Part.\n" + format: binary + additionalProperties: false + AssistantObject: + title: Assistant + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata + type: object + properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' + object: + enum: + - assistant + type: string + description: 'The object type, which is always `assistant`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the assistant was created. + name: + maxLength: 256 + type: string + description: "The name of the assistant. The maximum length is 256 characters.\n" + nullable: true + description: + maxLength: 512 + type: string + description: "The description of the assistant. The maximum length is 512 characters.\n" + nullable: true + model: + type: string + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" + instructions: + maxLength: 256000 + type: string + description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" + nullable: true + tools: + maxItems: 128 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/AssistantToolsCode' + file_search: '#/components/schemas/AssistantToolsFileSearch' + function: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 + nullable: true + example: 1 + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + description: Represents an `assistant` that can call the model and use tools. + x-oaiMeta: + name: The assistant object + beta: true + example: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" + AssistantStreamEvent: + oneOf: + - $ref: '#/components/schemas/ErrorEvent' + - $ref: '#/components/schemas/DoneEvent' + - required: + - event + - data + type: object + properties: + enabled: + type: boolean + description: Whether to enable input audio transcription. + event: + enum: + - thread.created + type: string + data: + $ref: '#/components/schemas/ThreadObject' + description: 'Occurs when a new [thread](/docs/api-reference/threads/object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [thread](/docs/api-reference/threads/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.created + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a new [run](/docs/api-reference/runs/object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.queued + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.in_progress + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.requires_action + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.completed + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) is completed.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.incomplete + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.failed + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) fails.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.cancelling + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.cancelled + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) is cancelled.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.expired + type: string + data: + $ref: '#/components/schemas/RunObject' + description: 'Occurs when a [run](/docs/api-reference/runs/object) expires.' + x-oaiMeta: + dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.step.created + type: string + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.step.in_progress + type: string + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) moves to an `in_progress` state.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.step.delta + type: string + data: + $ref: '#/components/schemas/RunStepDeltaObject' + description: 'Occurs when parts of a [run step](/docs/api-reference/run-steps/step-object) are being streamed.' + x-oaiMeta: + dataDescription: '`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.step.completed + type: string + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is completed.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.step.failed + type: string + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) fails.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.step.cancelled + type: string + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) is cancelled.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.run.step.expired + type: string + data: + $ref: '#/components/schemas/RunStepObject' + description: 'Occurs when a [run step](/docs/api-reference/run-steps/step-object) expires.' + x-oaiMeta: + dataDescription: '`data` is a [run step](/docs/api-reference/run-steps/step-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.created + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) is created.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.in_progress + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.delta + type: string + data: + $ref: '#/components/schemas/MessageDeltaObject' + description: 'Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed.' + x-oaiMeta: + dataDescription: '`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.completed + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) is completed.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + - required: + - event + - data + type: object + properties: + event: + enum: + - thread.message.incomplete + type: string + data: + $ref: '#/components/schemas/MessageObject' + description: 'Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed.' + x-oaiMeta: + dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' + description: "Represents an event emitted when streaming a Run.\n\nEach event in a server-sent events stream has an `event` and `data` property:\n\n```\nevent: thread.created\ndata: {\"id\": \"thread_123\", \"object\": \"thread\", ...}\n```\n\nWe emit events whenever a new object is created, transitions to a new state, or is being\nstreamed in parts (deltas). For example, we emit `thread.run.created` when a new run\nis created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses\nto create a message during a run, we emit a `thread.message.created event`, a\n`thread.message.in_progress` event, many `thread.message.delta` events, and finally a\n`thread.message.completed` event.\n\nWe may add additional events over time, so we recommend handling unknown events gracefully\nin your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to\nintegrate the Assistants API with streaming.\n" + discriminator: + propertyName: event + mapping: + error: '#/components/schemas/ErrorEvent' + done: '#/components/schemas/DoneEvent' + thread.created: '#/components/schemas/AssistantStreamEvent/oneOf/2' + thread.run.created: '#/components/schemas/AssistantStreamEvent/oneOf/3' + thread.run.queued: '#/components/schemas/AssistantStreamEvent/oneOf/4' + thread.run.in_progress: '#/components/schemas/AssistantStreamEvent/oneOf/5' + thread.run.requires_action: '#/components/schemas/AssistantStreamEvent/oneOf/6' + thread.run.completed: '#/components/schemas/AssistantStreamEvent/oneOf/7' + thread.run.incomplete: '#/components/schemas/AssistantStreamEvent/oneOf/8' + thread.run.failed: '#/components/schemas/AssistantStreamEvent/oneOf/9' + thread.run.cancelling: '#/components/schemas/AssistantStreamEvent/oneOf/10' + thread.run.cancelled: '#/components/schemas/AssistantStreamEvent/oneOf/11' + thread.run.expired: '#/components/schemas/AssistantStreamEvent/oneOf/12' + thread.run.step.created: '#/components/schemas/AssistantStreamEvent/oneOf/13' + thread.run.step.in_progress: '#/components/schemas/AssistantStreamEvent/oneOf/14' + thread.run.step.delta: '#/components/schemas/AssistantStreamEvent/oneOf/15' + thread.run.step.completed: '#/components/schemas/AssistantStreamEvent/oneOf/16' + thread.run.step.failed: '#/components/schemas/AssistantStreamEvent/oneOf/17' + thread.run.step.cancelled: '#/components/schemas/AssistantStreamEvent/oneOf/18' + thread.run.step.expired: '#/components/schemas/AssistantStreamEvent/oneOf/19' + thread.message.created: '#/components/schemas/AssistantStreamEvent/oneOf/20' + thread.message.in_progress: '#/components/schemas/AssistantStreamEvent/oneOf/21' + thread.message.delta: '#/components/schemas/AssistantStreamEvent/oneOf/22' + thread.message.completed: '#/components/schemas/AssistantStreamEvent/oneOf/23' + thread.message.incomplete: '#/components/schemas/AssistantStreamEvent/oneOf/24' + x-oaiMeta: + name: Assistant stream events + beta: true + AssistantToolsCode: + title: Code interpreter tool + required: + - type + type: object + properties: + type: + enum: + - code_interpreter + type: string + description: 'The type of tool being defined: `code_interpreter`' + AssistantToolsFileSearch: + title: FileSearch tool + required: + - type + type: object + properties: + type: + enum: + - file_search + type: string + description: 'The type of tool being defined: `file_search`' + file_search: + type: object + properties: + max_num_results: + maximum: 50 + minimum: 1 + type: integer + description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" + ranking_options: + $ref: '#/components/schemas/FileSearchRankingOptions' + description: Overrides for the file search tool. + AssistantToolsFileSearchTypeOnly: + title: FileSearch tool + required: + - type + type: object + properties: + type: + enum: + - file_search + type: string + description: 'The type of tool being defined: `file_search`' + AssistantToolsFunction: + title: Function tool + required: + - type + - function + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of tool being defined: `function`' + function: + $ref: '#/components/schemas/FunctionObject' + AssistantsApiResponseFormatOption: + oneOf: + - enum: + - auto + type: string + description: "`auto` is the default value\n" + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" + x-oaiExpandable: true + AssistantsApiToolChoiceOption: + oneOf: + - enum: + - none + - auto + - required + type: string + description: "`none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.\n" + - $ref: '#/components/schemas/AssistantsNamedToolChoice' + description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tools and instead generates a message.\n`auto` is the default value and means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools before responding to the user.\nSpecifying a particular tool like `{\"type\": \"file_search\"}` or `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n" + x-oaiExpandable: true + AssistantsNamedToolChoice: + required: + - type + type: object + properties: + type: + enum: + - function + - code_interpreter + - file_search + type: string + description: 'The type of the tool. If type is `function`, the function name must be set' + function: + required: + - name + type: object + properties: + name: + type: string + description: The name of the function to call. + description: Specifies a tool the model should use. Use to force the model to call a specific tool. + AudioResponseFormat: + enum: + - json + - text + - srt + - verbose_json + - vtt + type: string + description: "The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" + default: json + AuditLog: + required: + - id + - type + - effective_at + - actor + type: object + properties: + id: + type: string + description: The ID of this log. + type: + $ref: '#/components/schemas/AuditLogEventType' + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: + type: object + properties: + id: + type: string + description: The project ID. + name: + type: string + description: The project title. + description: The project that the action was scoped to. Absent for actions not scoped to projects. + actor: + $ref: '#/components/schemas/AuditLogActor' + api_key.created: + type: object + properties: + id: + type: string + description: The tracking ID of the API key. + data: + type: object + properties: + scopes: + type: array + items: + type: string + description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' + description: The payload used to create the API key. + description: The details for events with this `type`. + api_key.updated: + type: object + properties: + id: + type: string + description: The tracking ID of the API key. + changes_requested: + type: object + properties: + scopes: + type: array + items: + type: string + description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' + description: The payload used to update the API key. + description: The details for events with this `type`. + api_key.deleted: + type: object + properties: + id: + type: string + description: The tracking ID of the API key. + description: The details for events with this `type`. + invite.sent: + type: object + properties: + id: + type: string + description: The ID of the invite. + data: + type: object + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or `member`. + description: The payload used to create the invite. + description: The details for events with this `type`. + invite.accepted: + type: object + properties: + id: + type: string + description: The ID of the invite. + description: The details for events with this `type`. + invite.deleted: + type: object + properties: + id: + type: string + description: The ID of the invite. + description: The details for events with this `type`. + login.failed: + type: object + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + description: The details for events with this `type`. + logout.failed: + type: object + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + description: The details for events with this `type`. + organization.updated: + type: object + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: + type: object + properties: + threads_ui_visibility: + type: string + description: 'Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.' + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. + description: The payload used to update the organization settings. + description: The details for events with this `type`. + project.created: + type: object + properties: + id: + type: string + description: The project ID. + data: + type: object + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + description: The payload used to create the project. + description: The details for events with this `type`. + project.updated: + type: object + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + description: The payload used to update the project. + description: The details for events with this `type`. + project.archived: + type: object + properties: + id: + type: string + description: The project ID. + description: The details for events with this `type`. + rate_limit.updated: + type: object + properties: + id: + type: string + description: The rate limit ID + changes_requested: + type: object + properties: + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only relevant for certain models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only relevant for certain models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only relevant for certain models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only relevant for certain models. + description: The payload used to update the rate limits. + description: The details for events with this `type`. + rate_limit.deleted: + type: object + properties: + id: + type: string + description: The rate limit ID + description: The details for events with this `type`. + service_account.created: + type: object + properties: + id: + type: string + description: The service account ID. + data: + type: object + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + description: The payload used to create the service account. + description: The details for events with this `type`. + service_account.updated: + type: object + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + description: The payload used to updated the service account. + description: The details for events with this `type`. + service_account.deleted: + type: object + properties: + id: + type: string + description: The service account ID. + description: The details for events with this `type`. + user.added: + type: object + properties: + id: + type: string + description: The user ID. + data: + type: object + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + description: The payload used to add the user to the project. + description: The details for events with this `type`. + user.updated: + type: object + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + description: The payload used to update the user. + description: The details for events with this `type`. + user.deleted: + type: object + properties: + id: + type: string + description: The user ID. + description: The details for events with this `type`. + description: A log of a user action or configuration change within this organization. + x-oaiMeta: + name: The audit log object + example: "{\n \"id\": \"req_xxx_20240101\",\n \"type\": \"api_key.created\",\n \"effective_at\": 1720804090,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.created\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource.operation\"]\n }\n }\n}\n" + AuditLogActor: + type: object + properties: + type: + enum: + - session + - api_key + type: string + description: The type of actor. Is either `session` or `api_key`. + session: + $ref: '#/components/schemas/AuditLogActorSession' + api_key: + $ref: '#/components/schemas/AuditLogActorApiKey' + description: The actor who performed the audit logged action. + AuditLogActorApiKey: + type: object + properties: + id: + type: string + description: The tracking id of the API key. + type: + enum: + - user + - service_account + type: string + description: The type of API key. Can be either `user` or `service_account`. + user: + $ref: '#/components/schemas/AuditLogActorUser' + service_account: + $ref: '#/components/schemas/AuditLogActorServiceAccount' + description: The API Key used to perform the audit logged action. + AuditLogActorServiceAccount: + type: object + properties: + id: + type: string + description: The service account id. + description: The service account that performed the audit logged action. + AuditLogActorSession: + type: object + properties: + user: + $ref: '#/components/schemas/AuditLogActorUser' + ip_address: + type: string + description: The IP address from which the action was performed. + description: The session in which the audit logged action was performed. + AuditLogActorUser: + type: object + properties: + id: + type: string + description: The user id. + email: + type: string + description: The user email. + description: The user who performed the audit logged action. + AuditLogEventType: + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - rate_limit.updated + - rate_limit.deleted + - user.added + - user.updated + - user.deleted + type: string + description: The event type. + x-oaiExpandable: true + AutoChunkingStrategyRequestParam: + title: Auto Chunking Strategy + required: + - type + type: object + properties: + type: + enum: + - auto + type: string + description: Always `auto`. + additionalProperties: false + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + Batch: + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + type: object + properties: + id: + type: string + object: + enum: + - batch + type: string + description: 'The object type, which is always `batch`.' + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + errors: + type: object + properties: + object: + type: string + description: 'The object type, which is always `list`.' + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: 'The name of the parameter that caused the error, if applicable.' + nullable: true + line: + type: integer + description: 'The line number of the input file where the error occurred, if applicable.' + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + type: string + description: The current status of the batch. + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: + required: + - total + - completed + - failed + type: object + properties: + total: + type: integer + description: Total number of requests in the batch. + completed: + type: integer + description: Number of requests that have been completed successfully. + failed: + type: integer + description: Number of requests that have failed. + description: The request counts for different statuses within the batch. + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + x-oaiMeta: + name: The batch object + example: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" + BatchRequestInput: + type: object + properties: + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: + enum: + - POST + type: string + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: + type: string + description: 'The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.' + description: The per-line object of the batch input file + x-oaiMeta: + name: The request input object + example: "{\"custom_id\": \"request-1\", \"method\": \"POST\", \"url\": \"/v1/chat/completions\", \"body\": {\"model\": \"gpt-4o-mini\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is 2+2?\"}]}}\n" + BatchRequestOutput: + type: object + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: + type: object + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + description: The JSON body of the response + x-oaiTypeLabel: map + nullable: true + error: + type: object + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + description: 'For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.' + nullable: true + description: The per-line object of the batch output and error files + x-oaiMeta: + name: The request output object + example: "{\"id\": \"batch_req_wnaDys\", \"custom_id\": \"request-2\", \"response\": {\"status_code\": 200, \"request_id\": \"req_c187b3\", \"body\": {\"id\": \"chatcmpl-9758Iw\", \"object\": \"chat.completion\", \"created\": 1711475054, \"model\": \"gpt-4o-mini\", \"choices\": [{\"index\": 0, \"message\": {\"role\": \"assistant\", \"content\": \"2 + 2 equals 4.\"}, \"finish_reason\": \"stop\"}], \"usage\": {\"prompt_tokens\": 24, \"completion_tokens\": 15, \"total_tokens\": 39}, \"system_fingerprint\": null}}, \"error\": null}\n" + CancelUploadRequest: + type: object + additionalProperties: false + ChatCompletionFunctionCallOption: + required: + - name + type: object + properties: + name: + type: string + description: The name of the function to call. + description: "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n" + ChatCompletionFunctions: + required: + - name + type: object + properties: + description: + type: string + description: 'A description of what the function does, used by the model to choose when and how to call the function.' + name: + type: string + description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' + parameters: + $ref: '#/components/schemas/FunctionParameters' + deprecated: true + ChatCompletionMessageToolCall: + required: + - id + - type + - function + type: object + properties: + id: + type: string + description: The ID of the tool call. + type: + enum: + - function + type: string + description: 'The type of the tool. Currently, only `function` is supported.' + function: + required: + - name + - arguments + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + description: The function that the model called. + ChatCompletionMessageToolCallChunk: + required: + - index + type: object + properties: + index: + type: integer + id: + type: string + description: The ID of the tool call. + type: + enum: + - function + type: string + description: 'The type of the tool. Currently, only `function` is supported.' + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + ChatCompletionMessageToolCalls: + type: array + items: + $ref: '#/components/schemas/ChatCompletionMessageToolCall' + description: 'The tool calls generated by the model, such as function calls.' + ChatCompletionModalities: + type: array + items: + enum: + - text + - audio + type: string + description: "Output types that you would like the model to generate for this request.\nMost models are capable of generating text, which is the default:\n\n`[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To\nrequest that this model generate both text and audio responses, you can\nuse:\n\n`[\"text\", \"audio\"]`\n" + nullable: true + ChatCompletionNamedToolChoice: + required: + - type + - function + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of the tool. Currently, only `function` is supported.' + function: + required: + - name + type: object + properties: + name: + type: string + description: The name of the function to call. + description: Specifies a tool the model should use. Use to force the model to call a specific function. + ChatCompletionRequestAssistantMessage: + title: Assistant message + required: + - role + type: object + properties: + content: + oneOf: + - title: Text content + type: string + description: The contents of the assistant message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestAssistantMessageContentPart' + description: 'An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.' + description: "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n" + nullable: true + x-oaiExpandable: true + refusal: + type: string + description: The refusal message by the assistant. + nullable: true + role: + enum: + - assistant + type: string + description: 'The role of the messages author, in this case `assistant`.' + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + audio: + required: + - id + type: object + properties: + id: + type: string + description: "Unique identifier for a previous audio response from the model.\n" + description: "Data about a previous audio response from the model. \n[Learn more](/docs/guides/audio).\n" + nullable: true + x-oaiExpandable: true + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCalls' + function_call: + required: + - arguments + - name + type: object + properties: + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + name: + type: string + description: The name of the function to call. + description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' + nullable: true + deprecated: true + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + refusal: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' + x-oaiExpandable: true + ChatCompletionRequestFunctionMessage: + title: Function message + required: + - role + - content + - name + type: object + properties: + role: + enum: + - function + type: string + description: 'The role of the messages author, in this case `function`.' + content: + type: string + description: The contents of the function message. + nullable: true + name: + type: string + description: The name of the function to call. + deprecated: true + ChatCompletionRequestMessage: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' + - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' + - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' + discriminator: + propertyName: role + mapping: + system: '#/components/schemas/ChatCompletionRequestSystemMessage' + user: '#/components/schemas/ChatCompletionRequestUserMessage' + assistant: '#/components/schemas/ChatCompletionRequestAssistantMessage' + tool: '#/components/schemas/ChatCompletionRequestToolMessage' + function: '#/components/schemas/ChatCompletionRequestFunctionMessage' + x-oaiExpandable: true + ChatCompletionRequestMessageContentPartAudio: + title: Audio content part + required: + - type + - input_audio + type: object + properties: + type: + enum: + - input_audio + type: string + description: The type of the content part. Always `input_audio`. + input_audio: + required: + - data + - format + type: object + properties: + data: + type: string + description: Base64 encoded audio data. + format: + enum: + - wav + - mp3 + type: string + description: "The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".\n" + description: "Learn about [audio inputs](/docs/guides/audio).\n" + ChatCompletionRequestMessageContentPartImage: + title: Image content part + required: + - type + - image_url + type: object + properties: + type: + enum: + - image_url + type: string + description: The type of the content part. + image_url: + required: + - url + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).' + default: auto + description: "Learn about [image inputs](/docs/guides/vision).\n" + ChatCompletionRequestMessageContentPartRefusal: + title: Refusal content part + required: + - type + - refusal + type: object + properties: + type: + enum: + - refusal + type: string + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. + ChatCompletionRequestMessageContentPartText: + title: Text content part + required: + - type + - text + type: object + properties: + type: + enum: + - text + type: string + description: The type of the content part. + text: + type: string + description: The text content. + description: "Learn about [text inputs](/docs/guides/text-generation).\n" + ChatCompletionRequestSystemMessage: + title: System message + required: + - content + - role + type: object + properties: + content: + oneOf: + - title: Text content + type: string + description: The contents of the system message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestSystemMessageContentPart' + description: 'An array of content parts with a defined type. For system messages, only type `text` is supported.' + description: The contents of the system message. + role: + enum: + - system + type: string + description: 'The role of the messages author, in this case `system`.' + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + x-oaiExpandable: true + ChatCompletionRequestToolMessage: + title: Tool message + required: + - role + - content + - tool_call_id + type: object + properties: + role: + enum: + - tool + type: string + description: 'The role of the messages author, in this case `tool`.' + content: + oneOf: + - title: Text content + type: string + description: The contents of the tool message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestToolMessageContentPart' + description: 'An array of content parts with a defined type. For tool messages, only type `text` is supported.' + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + x-oaiExpandable: true + ChatCompletionRequestUserMessage: + title: User message + required: + - content + - role + type: object + properties: + content: + oneOf: + - title: Text content + type: string + description: The text contents of the message. + - title: Array of content parts + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestUserMessageContentPart' + description: 'An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs.' + description: "The contents of the user message.\n" + x-oaiExpandable: true + role: + enum: + - user + type: string + description: 'The role of the messages author, in this case `user`.' + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartAudio' + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + image_url: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' + input_audio: '#/components/schemas/ChatCompletionRequestMessageContentPartAudio' + x-oaiExpandable: true + ChatCompletionResponseMessage: required: - - type - - message - - param - - code + - role + - content + - refusal type: object properties: - code: + content: type: string + description: The contents of the message. nullable: true - message: + refusal: type: string - param: + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: '#/components/schemas/ChatCompletionMessageToolCalls' + role: + enum: + - assistant type: string + description: The role of the author of this message. + function_call: + required: + - name + - arguments + type: object + properties: + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + name: + type: string + description: The name of the function to call. + description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' + deprecated: true + audio: + required: + - id + - expires_at + - data + - transcript + type: object + properties: + id: + type: string + description: Unique identifier for this audio response. + expires_at: + type: integer + description: "The Unix timestamp (in seconds) for when this audio response will\nno longer be accessible on the server for use in multi-turn\nconversations.\n" + data: + type: string + description: "Base64 encoded audio bytes generated by the model, in the format\nspecified in the request.\n" + transcript: + type: string + description: Transcript of the audio generated by the model. + description: "If the audio output modality is requested, this object contains data\nabout the audio response from the model. [Learn more](/docs/guides/audio).\n" nullable: true - type: + x-oaiExpandable: true + description: A chat completion message generated by the model. + ChatCompletionRole: + enum: + - system + - user + - assistant + - tool + - function + type: string + description: The role of the author of a message + ChatCompletionStreamOptions: + type: object + properties: + include_usage: + type: boolean + description: "If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.\n" + description: "Options for streaming response. Only set this when you set `stream: true`.\n" + default: + nullable: true + ChatCompletionStreamResponseDelta: + type: object + properties: + content: type: string - ErrorResponse: + description: The contents of the chunk message. + nullable: true + function_call: + type: object + properties: + arguments: + type: string + description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' + name: + type: string + description: The name of the function to call. + description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' + deprecated: true + tool_calls: + type: array + items: + $ref: '#/components/schemas/ChatCompletionMessageToolCallChunk' + role: + enum: + - system + - user + - assistant + - tool + type: string + description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + description: A chat completion delta generated by streamed model responses. + ChatCompletionTokenLogprob: required: - - error + - token + - logprob + - bytes + - top_logprobs type: object properties: - error: - $ref: '#/components/schemas/Error' - ListModelsResponse: + token: + type: string + description: The token. + logprob: + type: number + description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' + bytes: + type: array + items: + type: integer + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + nullable: true + top_logprobs: + type: array + items: + required: + - token + - logprob + - bytes + type: object + properties: + token: + type: string + description: The token. + logprob: + type: number + description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' + bytes: + type: array + items: + type: integer + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + nullable: true + description: 'List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.' + ChatCompletionTool: required: - - object - - data + - type + - function type: object properties: - object: + type: enum: - - list + - function type: string - data: + description: 'The type of the tool. Currently, only `function` is supported.' + function: + $ref: '#/components/schemas/FunctionObject' + ChatCompletionToolChoiceOption: + oneOf: + - enum: + - none + - auto + - required + type: string + description: "`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.\n" + - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' + description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" + x-oaiExpandable: true + ChunkingStrategyRequestParam: + type: object + oneOf: + - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' + - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + discriminator: + propertyName: type + mapping: + auto: '#/components/schemas/AutoChunkingStrategyRequestParam' + static: '#/components/schemas/StaticChunkingStrategyRequestParam' + x-oaiExpandable: true + CompleteUploadRequest: + required: + - part_ids + type: object + properties: + part_ids: type: array items: - $ref: '#/components/schemas/Model' - DeleteModelResponse: + type: string + description: "The ordered list of Part IDs.\n" + md5: + type: string + description: "The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect.\n" + additionalProperties: false + CompletionUsage: + required: + - prompt_tokens + - completion_tokens + - total_tokens + type: object + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + type: object + properties: + accepted_prediction_tokens: + type: integer + description: "When using Predicted Outputs, the number of tokens in the\nprediction that appeared in the completion.\n" + audio_tokens: + type: integer + description: Audio input tokens generated by the model. + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. + rejected_prediction_tokens: + type: integer + description: "When using Predicted Outputs, the number of tokens in the\nprediction that did not appear in the completion. However, like\nreasoning tokens, these tokens are still counted in the total\ncompletion tokens for purposes of billing, output, and context window\nlimits.\n" + description: Breakdown of tokens used in a completion. + prompt_tokens_details: + type: object + properties: + audio_tokens: + type: integer + description: Audio input tokens present in the prompt. + cached_tokens: + type: integer + description: Cached tokens present in the prompt. + description: Breakdown of tokens used in the prompt. + description: Usage statistics for the completion request. + CostsResult: required: - - id - object - - deleted + - sessions type: object properties: - id: - type: string - deleted: - type: boolean object: + enum: + - organization.costs.result type: string - CreateCompletionRequest: + amount: + type: object + properties: + value: + type: number + description: The numeric value of the cost. + currency: + type: string + description: Lowercase ISO-4217 currency e.g. "usd" + description: The monetary value in its associated currency. + line_item: + type: string + description: 'When `group_by=line_item`, this field provides the line item of the grouped costs result.' + project_id: + type: string + description: 'When `group_by=project_id`, this field provides the project ID of the grouped costs result.' + description: The aggregated costs details of the specific time bucket. + x-oaiMeta: + name: Costs object + example: "{\n \"object\": \"orgainzation.costs.result\",\n \"amount\": {\n \"value\": 0.06,\n \"currency\": \"usd\"\n },\n \"line_item\": \"Image models\",\n \"project_id\": \"proj_abc\"\n}\n" + CreateAssistantRequest: required: - model - - prompt type: object properties: model: anyOf: - type: string - enum: - - gpt-3.5-turbo-instruct - - davinci-002 - - babbage-002 + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" + example: gpt-4o x-oaiTypeLabel: string - prompt: - oneOf: - - type: string - default: '' - example: This is a test. - - type: array - items: - type: string - default: '' - example: This is a test. - - minItems: 1 - type: array - items: - type: integer - example: '[1212, 318, 257, 1332, 13]' - - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - example: '[[1212, 318, 257, 1332, 13]]' - description: "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n" - default: <|endoftext|> + name: + maxLength: 256 + type: string + description: "The name of the assistant. The maximum length is 256 characters.\n" nullable: true - best_of: - maximum: 20 - minimum: 0 - type: integer - description: "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" - default: 1 + description: + maxLength: 512 + type: string + description: "The description of the assistant. The maximum length is 512 characters.\n" nullable: true - echo: - type: boolean - description: "Echo back the prompt in addition to the completion\n" - default: false + instructions: + maxLength: 256000 + type: string + description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" nullable: true - frequency_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 + tools: + maxItems: 128 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/AssistantToolsCode' + file_search: '#/components/schemas/AssistantToolsFileSearch' + function: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + vector_stores: + maxItems: 1 + type: array + items: + type: object + properties: + file_ids: + maxItems: 10000 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" + chunking_strategy: + type: object + oneOf: + - title: Auto Chunking Strategy + required: + - type + type: object + properties: + type: + enum: + - auto + type: string + description: Always `auto`. + additionalProperties: false + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + - title: Static Chunking Strategy + required: + - type + - static + type: object + properties: + type: + enum: + - static + type: string + description: Always `static`. + static: + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + type: object + properties: + max_chunk_size_tokens: + maximum: 4096 + minimum: 100 + type: integer + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" + additionalProperties: false + additionalProperties: false + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + discriminator: + propertyName: type + mapping: + auto: '#/components/schemas/CreateAssistantRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/0' + static: '#/components/schemas/CreateAssistantRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/1' + x-oaiExpandable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + x-oaiTypeLabel: map + description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" nullable: true - logit_bias: + metadata: type: object - additionalProperties: - type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n" - default: '' + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true x-oaiTypeLabel: map - logprobs: - maximum: 5 - minimum: 0 - type: integer - description: "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n" - default: '' - nullable: true - max_tokens: - minimum: 0 - type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - default: 16 - nullable: true - example: 16 - n: - maximum: 128 - minimum: 1 - type: integer - description: "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" - default: 1 - nullable: true - example: 1 - presence_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 - type: integer - description: "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - stop: - oneOf: - - type: string - default: <|endoftext|> - nullable: true - example: "\n" - - maxItems: 4 - minItems: 1 - type: array - items: - type: string - example: '["\n"]' - description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.\n" - default: '' - nullable: true - stream: - type: boolean - description: "Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" - default: false - nullable: true - stream_options: - $ref: '#/components/schemas/ChatCompletionStreamOptions' - suffix: - type: string - description: "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n" - default: '' - nullable: true - example: test. temperature: maximum: 2 minimum: 0 type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" default: 1 nullable: true example: 1 @@ -3735,731 +6539,601 @@ components: maximum: 1 minimum: 0 type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" - default: 1 - nullable: true - example: 1 - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateCompletionResponse: + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 + nullable: true + example: 1 + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + additionalProperties: false + CreateChatCompletionFunctionResponse: required: - - id - - object + - choices - created + - id - model - - choices + - object type: object properties: id: type: string - description: A unique identifier for the completion. + description: A unique identifier for the chat completion. choices: type: array items: required: - finish_reason - index + - message - logprobs - - text type: object properties: finish_reason: enum: - stop - length + - function_call - content_filter type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function.\n" index: type: integer - logprobs: - type: object - properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: - type: array - items: - type: string - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: number - nullable: true - text: - type: string - description: The list of completion choices the model generated for the input prompt. + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. created: type: integer - description: The Unix timestamp (in seconds) of when the completion was created. + description: The Unix timestamp (in seconds) of when the chat completion was created. model: type: string - description: The model used for completion. + description: The model used for the chat completion. system_fingerprint: type: string description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" object: enum: - - text_completion + - chat.completion type: string - description: 'The object type, which is always "text_completion"' + description: 'The object type, which is always `chat.completion`.' usage: $ref: '#/components/schemas/CompletionUsage' - description: "Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).\n" + description: 'Represents a chat completion response returned by model, based on the provided input.' x-oaiMeta: - name: The completion object - legacy: true - example: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"gpt-4-turbo\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - ChatCompletionRequestMessageContentPartText: - title: Text content part - required: - - type - - text - type: object - properties: - type: - enum: - - text - type: string - description: The type of the content part. - text: - type: string - description: The text content. - ChatCompletionRequestMessageContentPartImage: - title: Image content part - required: - - type - - image_url - type: object - properties: - type: - enum: - - image_url - type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).' - default: auto - ChatCompletionRequestMessageContentPartRefusal: - title: Refusal content part - required: - - type - - refusal - type: object - properties: - type: - enum: - - refusal - type: string - description: The type of the content part. - refusal: - type: string - description: The refusal message generated by the model. - ChatCompletionRequestMessage: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - discriminator: - propertyName: role - mapping: - system: '#/components/schemas/ChatCompletionRequestSystemMessage' - user: '#/components/schemas/ChatCompletionRequestUserMessage' - assistant: '#/components/schemas/ChatCompletionRequestAssistantMessage' - tool: '#/components/schemas/ChatCompletionRequestToolMessage' - function: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - ChatCompletionRequestSystemMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestUserMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' - discriminator: - propertyName: type - mapping: - text: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - image_url: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' - x-oaiExpandable: true - ChatCompletionRequestAssistantMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' - discriminator: - propertyName: type - mapping: - text: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - refusal: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' - x-oaiExpandable: true - ChatCompletionRequestToolMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestSystemMessage: - title: System message - required: - - content - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The contents of the system message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestSystemMessageContentPart' - description: 'An array of content parts with a defined type. For system messages, only type `text` is supported.' - description: The contents of the system message. - role: - enum: - - system - type: string - description: 'The role of the messages author, in this case `system`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestUserMessage: - title: User message - required: - - content - - role + name: The chat completion object + group: chat + example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" + CreateChatCompletionImageResponse: type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The text contents of the message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContentPart' - description: 'An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model.' - description: "The contents of the user message.\n" - x-oaiExpandable: true - role: - enum: - - user - type: string - description: 'The role of the messages author, in this case `user`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestAssistantMessage: - title: Assistant message + description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n }\n}\n" + CreateChatCompletionRequest: required: - - role + - model + - messages type: object properties: - content: - oneOf: - - title: Text content + messages: + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionRequestMessage' + description: "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n" + model: + anyOf: + - type: string + - enum: + - o1-preview + - o1-preview-2024-09-12 + - o1-mini + - o1-mini-2024-09-12 + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-realtime-preview + - gpt-4o-realtime-preview-2024-10-01 + - gpt-4o-audio-preview + - gpt-4o-audio-preview-2024-10-01 + - chatgpt-4o-latest + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 type: string - description: The contents of the assistant message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessageContentPart' - description: 'An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.' - description: "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n" + description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API.' + example: gpt-4o + x-oaiTypeLabel: string + store: + type: boolean + description: "Whether or not to store the output of this chat completion request\nfor use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products.\n" + default: false nullable: true - refusal: - type: string - description: The refusal message by the assistant. + metadata: + type: object + additionalProperties: + type: string + description: "Developer-defined tags and values used for filtering completions\nin the [dashboard](https://platform.openai.com/chat-completions).\n" nullable: true - role: - enum: - - assistant - type: string - description: 'The role of the messages author, in this case `assistant`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - function_call: - required: - - arguments - - name + frequency_penalty: + maximum: 2 + minimum: -2 + type: number + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" + default: 0 + nullable: true + logit_bias: type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' + additionalProperties: + type: integer + description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n" + default: + nullable: true + x-oaiTypeLabel: map + logprobs: + type: boolean + description: 'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.' + default: false + nullable: true + top_logprobs: + maximum: 20 + minimum: 0 + type: integer + description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.' + nullable: true + max_tokens: + type: integer + description: "The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning).\n" nullable: true deprecated: true - FineTuneChatCompletionRequestAssistantMessage: - required: - - role - allOf: - - title: Assistant message + max_completion_tokens: + type: integer + description: "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n" + nullable: true + n: + maximum: 128 + minimum: 1 + type: integer + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + default: 1 + nullable: true + example: 1 + modalities: + $ref: '#/components/schemas/ChatCompletionModalities' + prediction: + oneOf: + - $ref: '#/components/schemas/PredictionContent' + description: "Configuration for a [Predicted Output](/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.\n" + nullable: true + x-oaiExpandable: true + audio: + required: + - voice + - format type: object properties: - weight: + voice: enum: - - 0 - - 1 - type: integer - description: Controls whether the assistant message is trained against (0 or 1) - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - ChatCompletionRequestToolMessage: - title: Tool message - required: - - role - - content - - tool_call_id - type: object - properties: - role: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + type: string + description: "The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive).\n" + format: + enum: + - wav + - mp3 + - flac + - opus + - pcm16 + type: string + description: "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`.\n" + description: "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](/docs/guides/audio).\n" + nullable: true + x-oaiExpandable: true + presence_penalty: + maximum: 2 + minimum: -2 + type: number + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" + default: 0 + nullable: true + response_format: + oneOf: + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJsonObject' + - $ref: '#/components/schemas/ResponseFormatJsonSchema' + description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o mini](/docs/models#gpt-4o-mini), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/ResponseFormatText' + json_object: '#/components/schemas/ResponseFormatJsonObject' + json_schema: '#/components/schemas/ResponseFormatJsonSchema' + x-oaiExpandable: true + seed: + maximum: 9223372036854776000 + minimum: -9223372036854776000 + type: integer + description: "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" + nullable: true + x-oaiMeta: + beta: true + service_tier: enum: - - tool + - auto + - default type: string - description: 'The role of the messages author, in this case `tool`.' - content: + description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.\n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" + default: auto + nullable: true + stop: oneOf: - - title: Text content - type: string - description: The contents of the tool message. - - title: Array of content parts + - type: string + nullable: true + - maxItems: 4 minItems: 1 type: array items: - $ref: '#/components/schemas/ChatCompletionRequestToolMessageContentPart' - description: 'An array of content parts with a defined type. For tool messages, only type `text` is supported.' - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - ChatCompletionRequestFunctionMessage: - title: Function message - required: - - role - - content - - name - type: object - properties: - role: - enum: - - function - type: string - description: 'The role of the messages author, in this case `function`.' - content: - type: string - description: The contents of the function message. - nullable: true - name: - type: string - description: The name of the function to call. - deprecated: true - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - ChatCompletionFunctions: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - deprecated: true - ChatCompletionFunctionCallOption: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n" - ChatCompletionTool: - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - $ref: '#/components/schemas/FunctionObject' - FunctionObject: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - strict: + type: string + description: "Up to 4 sequences where the API will stop generating further tokens.\n" + default: + stream: type: boolean - description: 'Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).' + description: "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" default: false nullable: true - ResponseFormatText: - required: - - type - type: object - properties: - type: - enum: - - text + stream_options: + $ref: '#/components/schemas/ChatCompletionStreamOptions' + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" + default: 1 + nullable: true + example: 1 + tools: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTool' + description: "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n" + tool_choice: + $ref: '#/components/schemas/ChatCompletionToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + user: type: string - description: 'The type of response format being defined: `text`' - ResponseFormatJsonObject: + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" + example: user-1234 + function_call: + oneOf: + - enum: + - none + - auto + type: string + description: "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.\n" + - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' + description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.\n" + deprecated: true + x-oaiExpandable: true + functions: + maxItems: 128 + minItems: 1 + type: array + items: + $ref: '#/components/schemas/ChatCompletionFunctions' + description: "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n" + deprecated: true + CreateChatCompletionResponse: required: - - type + - choices + - created + - id + - model + - object type: object properties: - type: - enum: - - json_object + id: type: string - description: 'The type of response format being defined: `json_object`' - ResponseFormatJsonSchemaSchema: - type: object - description: 'The schema for the response format, described as a JSON Schema object.' - ResponseFormatJsonSchema: - required: - - type - - json_schema - type: object - properties: - type: + description: A unique identifier for the chat completion. + choices: + type: array + items: + required: + - finish_reason + - index + - message + - logprobs + type: object + properties: + finish_reason: + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + type: string + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: '#/components/schemas/ChatCompletionResponseMessage' + logprobs: + required: + - content + - refusal + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message content tokens with log probability information. + nullable: true + refusal: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message refusal tokens with log probability information. + nullable: true + description: Log probability information for the choice. + nullable: true + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + service_tier: enum: - - json_schema + - scale + - default type: string - description: 'The type of response format being defined: `json_schema`' - json_schema: - required: - - type - - name - type: object - properties: - description: - type: string - description: 'A description of what the response format is for, used by the model to determine how to respond in the format.' - name: - type: string - description: 'The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - schema: - $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' - strict: - type: boolean - description: 'Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs).' - default: false - nullable: true - ChatCompletionToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + nullable: true + example: scale + system_fingerprint: type: string - description: "`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.\n" - - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" - x-oaiExpandable: true - ChatCompletionNamedToolChoice: - required: - - type - - function - type: object - properties: - type: + description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" + object: enum: - - function + - chat.completion type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific function. - ParallelToolCalls: - type: boolean - description: 'Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.' - nullable: true - ChatCompletionMessageToolCalls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCall' - description: 'The tool calls generated by the model, such as function calls.' - ChatCompletionMessageToolCall: + description: 'The object type, which is always `chat.completion`.' + usage: + $ref: '#/components/schemas/CompletionUsage' + description: 'Represents a chat completion response returned by model, based on the provided input.' + x-oaiMeta: + name: The chat completion object + group: chat + example: "{\n \"id\": \"chatcmpl-123456\",\n \"object\": \"chat.completion\",\n \"created\": 1728933352,\n \"model\": \"gpt-4o-2024-08-06\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hi there! How can I assist you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 19,\n \"completion_tokens\": 10,\n \"total_tokens\": 29,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_6b68a8204b\"\n}\n" + CreateChatCompletionStreamResponse: required: + - choices + - created - id - - type - - function + - model + - object type: object properties: id: type: string - description: The ID of the tool call. - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name - - arguments - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - description: The function that the model called. - ChatCompletionMessageToolCallChunk: - required: - - index - type: object - properties: - index: + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + items: + required: + - delta + - finish_reason + - index + type: object + properties: + delta: + $ref: '#/components/schemas/ChatCompletionStreamResponseDelta' + logprobs: + required: + - content + - refusal + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message content tokens with log probability information. + nullable: true + refusal: + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + description: A list of message refusal tokens with log probability information. + nullable: true + description: Log probability information for the choice. + nullable: true + finish_reason: + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + type: string + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + description: "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" + created: type: integer - id: + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: type: string - description: The ID of the tool call. - type: + description: The model to generate the completion. + service_tier: enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - ChatCompletionRole: - enum: - - system - - user - - assistant - - tool - - function - type: string - description: The role of the author of a message - ChatCompletionStreamOptions: - type: object - properties: - include_usage: - type: boolean - description: "If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.\n" - description: "Options for streaming response. Only set this when you set `stream: true`.\n" - default: '' - nullable: true - ChatCompletionResponseMessage: - required: - - role - - content - - refusal - type: object - properties: - content: + - scale + - default type: string - description: The contents of the message. + description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. nullable: true - refusal: + example: scale + system_fingerprint: type: string - description: The refusal message generated by the model. - nullable: true - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - role: + description: "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" + object: enum: - - assistant + - chat.completion.chunk type: string - description: The role of the author of this message. - function_call: + description: 'The object type, which is always `chat.completion.chunk`.' + usage: required: - - name - - arguments - type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - description: A chat completion message generated by the model. - ChatCompletionStreamResponseDelta: - type: object - properties: - content: - type: string - description: The contents of the chunk message. - nullable: true - function_call: + - prompt_tokens + - completion_tokens + - total_tokens type: object properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCallChunk' - role: - enum: - - system - - user - - assistant - - tool - type: string - description: The role of the author of this message. - refusal: - type: string - description: The refusal message generated by the model. + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + description: "An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request.\nWhen present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n" nullable: true - description: A chat completion delta generated by streamed model responses. - CreateChatCompletionRequest: + description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" + CreateCompletionRequest: required: - model - - messages + - prompt type: object properties: - messages: - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessage' - description: 'A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).' model: anyOf: - type: string - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0301 - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 + - gpt-3.5-turbo-instruct + - davinci-002 + - babbage-002 type: string - description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.' - example: gpt-4o + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" x-oaiTypeLabel: string + prompt: + oneOf: + - type: string + default: '' + example: This is a test. + - type: array + items: + type: string + default: '' + example: This is a test. + - minItems: 1 + type: array + items: + type: integer + example: '[1212, 318, 257, 1332, 13]' + - minItems: 1 + type: array + items: + minItems: 1 + type: array + items: + type: integer + example: '[[1212, 318, 257, 1332, 13]]' + description: "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n" + default: <|endoftext|> + nullable: true + best_of: + maximum: 20 + minimum: 0 + type: integer + description: "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" + default: 1 + nullable: true + echo: + type: boolean + description: "Echo back the prompt in addition to the completion\n" + default: false + nullable: true frequency_penalty: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" default: 0 nullable: true logit_bias: type: object additionalProperties: type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n" - default: '' + description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n" + default: nullable: true x-oaiTypeLabel: map logprobs: - type: boolean - description: 'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.' - default: false - nullable: true - top_logprobs: - maximum: 20 + maximum: 5 minimum: 0 type: integer - description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.' + description: "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n" + default: nullable: true max_tokens: + minimum: 0 type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" + description: "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" + default: 16 nullable: true + example: 16 n: maximum: 128 minimum: 1 type: integer - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + description: "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" default: 1 nullable: true example: 1 @@ -4467,56 +7141,43 @@ components: maximum: 2 minimum: -2 type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" + description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n" default: 0 nullable: true - response_format: - oneOf: - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - discriminator: - propertyName: type - mapping: - text: '#/components/schemas/ResponseFormatText' - json_object: '#/components/schemas/ResponseFormatJsonObject' - json_schema: '#/components/schemas/ResponseFormatJsonSchema' - x-oaiExpandable: true - seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 - type: integer - description: "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - x-oaiMeta: - beta: true - service_tier: - enum: - - auto - - default - type: string - description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', the system will utilize scale tier credits until they are exhausted.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" - default: '' - nullable: true + seed: + maximum: 9223372036854776000 + minimum: -9223372036854776000 + type: integer + description: "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" + nullable: true stop: oneOf: - type: string + default: <|endoftext|> nullable: true + example: "\n" - maxItems: 4 minItems: 1 type: array items: type: string - description: "Up to 4 sequences where the API will stop generating further tokens.\n" - default: '' + example: '["\n"]' + description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.\n" + default: + nullable: true stream: type: boolean - description: "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" + description: "Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" default: false nullable: true stream_options: $ref: '#/components/schemas/ChatCompletionStreamOptions' + suffix: + type: string + description: "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n" + default: + nullable: true + example: test. temperature: maximum: 2 minimum: 0 @@ -4533,347 +7194,387 @@ components: default: 1 nullable: true example: 1 - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n" - tool_choice: - $ref: '#/components/schemas/ChatCompletionToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 - function_call: - oneOf: - - enum: - - none - - auto - type: string - description: "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.\n" - - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.\n" - deprecated: true - x-oaiExpandable: true - functions: - maxItems: 128 - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n" - deprecated: true - CreateChatCompletionResponse: + CreateCompletionResponse: required: - - choices - - created - id - - model - object + - created + - model + - choices type: object properties: id: type: string - description: A unique identifier for the chat completion. + description: A unique identifier for the completion. choices: type: array items: required: - finish_reason - index - - message + - logprobs + - text type: object properties: finish_reason: enum: - stop - length - - tool_calls - content_filter - - function_call type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" + description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" index: type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' logprobs: - required: - - content - - refusal type: object properties: - content: + text_offset: type: array items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. - nullable: true - refusal: + type: integer + token_logprobs: type: array items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number nullable: true - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + text: + type: string + description: The list of completion choices the model generated for the input prompt. created: type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. + description: The Unix timestamp (in seconds) of when the completion was created. model: type: string - description: The model used for the chat completion. - service_tier: - enum: - - scale - - default - type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - nullable: true - example: scale + description: The model used for completion. system_fingerprint: type: string description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" object: enum: - - chat.completion + - text_completion type: string - description: 'The object type, which is always `chat.completion`.' + description: 'The object type, which is always "text_completion"' usage: $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' + description: "Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).\n" x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - CreateChatCompletionFunctionResponse: + name: The completion object + legacy: true + example: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"gpt-4-turbo\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" + CreateEmbeddingRequest: required: - - choices - - created - - id - model - - object + - input type: object properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - items: - required: - - finish_reason - - index - - message - - logprobs - type: object - properties: - finish_reason: - enum: - - stop - - length - - function_call - - content_filter + input: + oneOf: + - title: string + type: string + description: The string that will be turned into an embedding. + default: '' + example: This is a test. + - title: array + maxItems: 2048 + minItems: 1 + type: array + items: type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function.\n" - index: + default: '' + example: '[''This is a test.'']' + description: The array of strings that will be turned into an embedding. + - title: array + maxItems: 2048 + minItems: 1 + type: array + items: type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - chat.completion - type: string - description: 'The object type, which is always `chat.completion`.' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99\n }\n}\n" - ChatCompletionTokenLogprob: - required: - - token - - logprob - - bytes - - top_logprobs - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: - type: array - items: - type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - top_logprobs: - type: array - items: - required: - - token - - logprob - - bytes - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: + description: The array of integers that will be turned into an embedding. + example: '[1212, 318, 257, 1332, 13]' + - title: array + maxItems: 2048 + minItems: 1 + type: array + items: + minItems: 1 type: array items: type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - description: 'List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.' - ListPaginatedFineTuningJobsResponse: + description: The array of arrays containing integers that will be turned into an embedding. + example: '[[1212, 318, 257, 1332, 13]]' + description: "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" + example: The quick brown fox jumped over the lazy dog + x-oaiExpandable: true + model: + anyOf: + - type: string + - enum: + - text-embedding-ada-002 + - text-embedding-3-small + - text-embedding-3-large + type: string + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" + example: text-embedding-3-small + x-oaiTypeLabel: string + encoding_format: + enum: + - float + - base64 + type: string + description: 'The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).' + default: float + example: float + dimensions: + minimum: 1 + type: integer + description: "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.\n" + user: + type: string + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" + example: user-1234 + additionalProperties: false + CreateEmbeddingResponse: required: - object + - model - data - - has_more + - usage type: object properties: data: type: array items: - $ref: '#/components/schemas/FineTuningJob' - has_more: - type: boolean + $ref: '#/components/schemas/Embedding' + description: The list of embeddings generated by the model. + model: + type: string + description: The name of the model used to generate the embedding. object: enum: - list type: string - CreateChatCompletionStreamResponse: + description: 'The object type, which is always "list".' + usage: + required: + - prompt_tokens + - total_tokens + type: object + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + description: The usage information for the request. + CreateFileRequest: + required: + - file + - purpose + type: object + properties: + file: + type: string + description: "The File object (not file name) to be uploaded.\n" + format: binary + purpose: + enum: + - assistants + - batch + - fine-tune + - vision + type: string + description: "The intended purpose of the uploaded file.\n\nUse \"assistants\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \"vision\" for Assistants image file inputs, \"batch\" for [Batch API](/docs/guides/batch), and \"fine-tune\" for [Fine-tuning](/docs/api-reference/fine-tuning).\n" + additionalProperties: false + CreateFineTuningJobRequest: required: - - choices - - created - - id - model - - object + - training_file type: object properties: - id: + model: + anyOf: + - type: string + - enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + - gpt-4o-mini + type: string + description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).\n" + example: gpt-4o-mini + x-oaiTypeLabel: string + training_file: type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: + description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" + example: file-abc123 + hyperparameters: + type: object + properties: + batch_size: + oneOf: + - enum: + - auto + type: string + - maximum: 256 + minimum: 1 + type: integer + description: "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.\n" + default: auto + learning_rate_multiplier: + oneOf: + - enum: + - auto + type: string + - minimum: 0 + exclusiveMinimum: true + type: number + description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.\n" + default: auto + n_epochs: + oneOf: + - enum: + - auto + type: string + - maximum: 50 + minimum: 1 + type: integer + description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" + default: auto + description: The hyperparameters used for the fine-tuning job. + suffix: + maxLength: 64 + minLength: 1 + type: string + description: "A string of up to 64 characters that will be added to your fine-tuned model name.\n\nFor example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.\n" + default: + nullable: true + validation_file: + type: string + description: "The ID of an uploaded file that contains validation data.\n\nIf you provide this file, the data is used to generate validation\nmetrics periodically during fine-tuning. These metrics can be viewed in\nthe fine-tuning results file.\nThe same data should not be present in both train and validation files.\n\nYour dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" + nullable: true + example: file-abc123 + integrations: type: array items: required: - - delta - - finish_reason - - index + - type + - wandb type: object properties: - delta: - $ref: '#/components/schemas/ChatCompletionStreamResponseDelta' - logprobs: + type: + oneOf: + - enum: + - wandb + type: string + description: "The type of integration to enable. Currently, only \"wandb\" (Weights and Biases) is supported.\n" + wandb: required: - - content - - refusal + - project type: object properties: - content: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. + project: + type: string + description: "The name of the project that the new run will be created under.\n" + example: my-wandb-project + name: + type: string + description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" nullable: true - refusal: + entity: + type: string + description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" + nullable: true + tags: type: array items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. - nullable: true - finish_reason: - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" - nullable: true - index: - type: integer - description: The index of the choice in the list of choices. - description: "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" - created: + type: string + example: custom-tag + description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" + description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" + description: A list of integrations to enable for your fine-tuning job. + nullable: true + seed: + maximum: 2147483647 + minimum: 0 type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - model: + description: "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you.\n" + nullable: true + example: 42 + CreateImageEditRequest: + required: + - prompt + - image + type: object + properties: + image: type: string - description: The model to generate the completion. - service_tier: - enum: - - scale - - default + description: 'The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.' + format: binary + prompt: type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. + description: A text description of the desired image(s). The maximum length is 1000 characters. + example: A cute baby sea otter wearing a beret + mask: + type: string + description: 'An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.' + format: binary + model: + anyOf: + - type: string + - enum: + - dall-e-2 + type: string + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + default: dall-e-2 nullable: true - example: scale - system_fingerprint: + example: dall-e-2 + x-oaiTypeLabel: string + n: + maximum: 10 + minimum: 1 + type: integer + description: The number of images to generate. Must be between 1 and 10. + default: 1 + nullable: true + example: 1 + size: + enum: + - 256x256 + - 512x512 + - 1024x1024 type: string - description: "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: + description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' + default: 1024x1024 + nullable: true + example: 1024x1024 + response_format: enum: - - chat.completion.chunk + - url + - b64_json + type: string + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + default: url + nullable: true + example: url + user: type: string - description: 'The object type, which is always `chat.completion.chunk`.' - usage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - description: "An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request.\nWhen present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n" - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - CreateChatCompletionImageResponse: - type: object - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" + example: user-1234 CreateImageRequest: required: - prompt @@ -4943,94 +7644,7 @@ components: example: vivid user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - ImagesResponse: - required: - - created - - data - properties: - created: - type: integer - data: - type: array - items: - $ref: '#/components/schemas/Image' - Image: - type: object - properties: - b64_json: - type: string - description: 'The base64-encoded JSON of the generated image, if `response_format` is `b64_json`.' - url: - type: string - description: 'The URL of the generated image, if `response_format` is `url` (default).' - revised_prompt: - type: string - description: 'The prompt that was used to generate the image, if there was any revision to the prompt.' - description: Represents the url or the content of an image generated by the OpenAI API. - x-oaiMeta: - name: The image object - example: "{\n \"url\": \"...\",\n \"revised_prompt\": \"...\"\n}\n" - CreateImageEditRequest: - required: - - prompt - - image - type: object - properties: - image: - type: string - description: 'The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.' - format: binary - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - example: A cute baby sea otter wearing a beret - mask: - type: string - description: 'An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.' - format: binary - model: - anyOf: - - type: string - - enum: - - dall-e-2 - type: string - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - default: dall-e-2 - nullable: true - example: dall-e-2 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: The number of images to generate. Must be between 1 and 10. - default: 1 - nullable: true - example: 1 - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' - default: 1024x1024 - nullable: true - example: 1024x1024 - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 CreateImageVariationRequest: required: @@ -5081,8 +7695,74 @@ components: example: 1024x1024 user: type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" + description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n" example: user-1234 + CreateMessageRequest: + required: + - role + - content + type: object + properties: + role: + enum: + - user + - assistant + type: string + description: "The role of the entity that is creating the message. Allowed values include:\n- `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n- `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n" + content: + oneOf: + - title: Text content + type: string + description: The text contents of the message. + - title: Array of content parts + minItems: 1 + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageContentImageFileObject' + - $ref: '#/components/schemas/MessageContentImageUrlObject' + - $ref: '#/components/schemas/MessageRequestContentTextObject' + discriminator: + propertyName: type + mapping: + image_file: '#/components/schemas/MessageContentImageFileObject' + image_url: '#/components/schemas/MessageContentImageUrlObject' + text: '#/components/schemas/MessageRequestContentTextObject' + x-oaiExpandable: true + description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models).' + x-oaiExpandable: true + attachments: + required: + - file_id + - tools + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/AssistantToolsCode' + file_search: '#/components/schemas/AssistantToolsFileSearchTypeOnly' + x-oaiExpandable: true + description: The tools to add this file to. + description: 'A list of files attached to the message, and the tools they should be added to.' + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false CreateModerationRequest: required: - input @@ -5091,6 +7771,7 @@ components: input: oneOf: - type: string + description: A string of text to classify for moderation. default: '' example: I want to kill them. - type: array @@ -5098,17 +7779,68 @@ components: type: string default: '' example: I want to kill them. - description: The input text to classify + description: An array of strings to classify for moderation. + - type: array + items: + oneOf: + - required: + - type + - image_url + type: object + properties: + type: + enum: + - image_url + type: string + description: Always `image_url`. + image_url: + required: + - url + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + example: https://example.com/image.jpg + description: Contains either an image URL or a data URL for a base64 encoded image. + description: An object describing an image to classify. + - required: + - type + - text + type: object + properties: + type: + enum: + - text + type: string + description: Always `text`. + text: + type: string + description: A string of text to classify. + example: I want to kill them + description: An object describing text to classify. + discriminator: + propertyName: type + mapping: + image_url: '#/components/schemas/CreateModerationRequest/properties/input/oneOf/items/oneOf/0' + text: '#/components/schemas/CreateModerationRequest/properties/input/oneOf/items/oneOf/1' + x-oaiExpandable: true + description: An array of multi-modal inputs to the moderation model. + description: "Input (or inputs) to classify. Can be a single string, an array of strings, or\nan array of multi-modal input objects similar to other models.\n" + x-oaiExpandable: true model: anyOf: - type: string - enum: + - omni-moderation-latest + - omni-moderation-2024-09-26 - text-moderation-latest - text-moderation-stable type: string - description: "Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.\n\nThe default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.\n" - default: text-moderation-latest - example: text-moderation-stable + description: "The content moderation model you would like to use. Learn more in\n[the moderation guide](/docs/guides/moderation), and learn about\navailable models [here](/docs/models#moderation).\n" + default: omni-moderation-latest + example: omni-moderation-2024-09-26 x-oaiTypeLabel: string CreateModerationResponse: required: @@ -5130,6 +7862,7 @@ components: - flagged - categories - category_scores + - category_applied_input_types type: object properties: flagged: @@ -5141,6 +7874,8 @@ components: - hate/threatening - harassment - harassment/threatening + - illicit + - illicit/violent - self-harm - self-harm/intent - self-harm/instructions @@ -5162,6 +7897,12 @@ components: harassment/threatening: type: boolean description: Harassment content that also includes violence or serious harm towards any target. + illicit: + type: boolean + description: 'Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category.' + illicit/violent: + type: boolean + description: 'Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon.' self-harm: type: boolean description: 'Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.' @@ -5190,6 +7931,8 @@ components: - hate/threatening - harassment - harassment/threatening + - illicit + - illicit/violent - self-harm - self-harm/intent - self-harm/instructions @@ -5211,393 +7954,557 @@ components: harassment/threatening: type: number description: The score for the category 'harassment/threatening'. + illicit: + type: number + description: The score for the category 'illicit'. + illicit/violent: + type: number + description: The score for the category 'illicit/violent'. self-harm: type: number description: The score for the category 'self-harm'. self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + description: A list of the categories along with their scores as predicted by model. + category_applied_input_types: + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + type: object + properties: + hate: + type: array + items: + enum: + - text + type: string + description: The applied input type(s) for the category 'hate'. + hate/threatening: + type: array + items: + enum: + - text + type: string + description: The applied input type(s) for the category 'hate/threatening'. + harassment: + type: array + items: + enum: + - text + type: string + description: The applied input type(s) for the category 'harassment'. + harassment/threatening: + type: array + items: + enum: + - text + type: string + description: The applied input type(s) for the category 'harassment/threatening'. + illicit: + type: array + items: + enum: + - text + type: string + description: The applied input type(s) for the category 'illicit'. + illicit/violent: + type: array + items: + enum: + - text + type: string + description: The applied input type(s) for the category 'illicit/violent'. + self-harm: + type: array + items: + enum: + - text + - image + type: string + description: The applied input type(s) for the category 'self-harm'. + self-harm/intent: + type: array + items: + enum: + - text + - image + type: string + description: The applied input type(s) for the category 'self-harm/intent'. self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. + type: array + items: + enum: + - text + - image + type: string + description: The applied input type(s) for the category 'self-harm/instructions'. sexual: - type: number - description: The score for the category 'sexual'. + type: array + items: + enum: + - text + - image + type: string + description: The applied input type(s) for the category 'sexual'. sexual/minors: - type: number - description: The score for the category 'sexual/minors'. + type: array + items: + enum: + - text + type: string + description: The applied input type(s) for the category 'sexual/minors'. violence: - type: number - description: The score for the category 'violence'. + type: array + items: + enum: + - text + - image + type: string + description: The applied input type(s) for the category 'violence'. violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - description: A list of the categories along with their scores as predicted by model. + type: array + items: + enum: + - text + - image + type: string + description: The applied input type(s) for the category 'violence/graphic'. + description: A list of the categories along with the input type(s) that the score applies to. description: A list of moderation objects. description: Represents if a given text input is potentially harmful. x-oaiMeta: name: The moderation object - example: "{\n \"id\": \"modr-XXXXX\",\n \"model\": \"text-moderation-005\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"sexual\": false,\n \"hate\": false,\n \"harassment\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"hate/threatening\": false,\n \"violence/graphic\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"harassment/threatening\": true,\n \"violence\": true,\n },\n \"category_scores\": {\n \"sexual\": 1.2282071e-06,\n \"hate\": 0.010696256,\n \"harassment\": 0.29842457,\n \"self-harm\": 1.5236925e-08,\n \"sexual/minors\": 5.7246268e-08,\n \"hate/threatening\": 0.0060676364,\n \"violence/graphic\": 4.435014e-06,\n \"self-harm/intent\": 8.098441e-10,\n \"self-harm/instructions\": 2.8498655e-11,\n \"harassment/threatening\": 0.63055265,\n \"violence\": 0.99011886,\n }\n }\n ]\n}\n" - ListFilesResponse: - required: - - object - - data - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - object: - enum: - - list - type: string - CreateFileRequest: - required: - - file - - purpose - type: object - properties: - file: - type: string - description: "The File object (not file name) to be uploaded.\n" - format: binary - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nUse \"assistants\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \"vision\" for Assistants image file inputs, \"batch\" for [Batch API](/docs/guides/batch), and \"fine-tune\" for [Fine-tuning](/docs/api-reference/fine-tuning).\n" - additionalProperties: false - DeleteFileResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - object: - enum: - - file - type: string - deleted: - type: boolean - CreateUploadRequest: - required: - - filename - - purpose - - bytes - - mime_type - type: object - properties: - filename: - type: string - description: "The name of the file to upload.\n" - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nSee the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).\n" - bytes: - type: integer - description: "The number of bytes in the file you are uploading.\n" - mime_type: - type: string - description: "The MIME type of the file.\n\nThis must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision.\n" - additionalProperties: false - AddUploadPartRequest: - required: - - data - type: object - properties: - data: - type: string - description: "The chunk of bytes for this Part.\n" - format: binary - additionalProperties: false - CompleteUploadRequest: + example: "{\n \"id\": \"modr-0d9740456c391e43c445bf0f010940c7\",\n \"model\": \"omni-moderation-latest\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"harassment\": true,\n \"harassment/threatening\": true,\n \"sexual\": false,\n \"hate\": false,\n \"hate/threatening\": false,\n \"illicit\": false,\n \"illicit/violent\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"violence\": true,\n \"violence/graphic\": true\n },\n \"category_scores\": {\n \"harassment\": 0.8189693396524255,\n \"harassment/threatening\": 0.804985420696006,\n \"sexual\": 1.573112165348997e-6,\n \"hate\": 0.007562942636942845,\n \"hate/threatening\": 0.004208854591835476,\n \"illicit\": 0.030535955153511665,\n \"illicit/violent\": 0.008925306722380033,\n \"self-harm/intent\": 0.00023023930975076432,\n \"self-harm/instructions\": 0.0002293869201073356,\n \"self-harm\": 0.012598046106750154,\n \"sexual/minors\": 2.212566909570261e-8,\n \"violence\": 0.9999992735124786,\n \"violence/graphic\": 0.843064871157054\n },\n \"category_applied_input_types\": {\n \"harassment\": [\n \"text\"\n ],\n \"harassment/threatening\": [\n \"text\"\n ],\n \"sexual\": [\n \"text\",\n \"image\"\n ],\n \"hate\": [\n \"text\"\n ],\n \"hate/threatening\": [\n \"text\"\n ],\n \"illicit\": [\n \"text\"\n ],\n \"illicit/violent\": [\n \"text\"\n ],\n \"self-harm/intent\": [\n \"text\",\n \"image\"\n ],\n \"self-harm/instructions\": [\n \"text\",\n \"image\"\n ],\n \"self-harm\": [\n \"text\",\n \"image\"\n ],\n \"sexual/minors\": [\n \"text\"\n ],\n \"violence\": [\n \"text\",\n \"image\"\n ],\n \"violence/graphic\": [\n \"text\",\n \"image\"\n ]\n }\n }\n ]\n}\n" + CreateRunRequest: required: - - part_ids + - assistant_id type: object properties: - part_ids: - type: array - items: - type: string - description: "The ordered list of Part IDs.\n" - md5: + assistant_id: type: string - description: "The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect.\n" - additionalProperties: false - CancelUploadRequest: - type: object - additionalProperties: false - CreateFineTuningJobRequest: - required: - - model - - training_file - type: object - properties: + description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' model: anyOf: - type: string - enum: - - babbage-002 - - davinci-002 - - gpt-3.5-turbo + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 type: string - description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).\n" - example: gpt-4o-mini + description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' + nullable: true + example: gpt-4o x-oaiTypeLabel: string - training_file: + instructions: type: string - description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" - example: file-abc123 - hyperparameters: - type: object - properties: - batch_size: - oneOf: - - enum: - - auto - type: string - - maximum: 256 - minimum: 1 - type: integer - description: "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.\n" - default: auto - learning_rate_multiplier: - oneOf: - - enum: - - auto - type: string - - minimum: 0 - exclusiveMinimum: true - type: number - description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.\n" - default: auto - n_epochs: - oneOf: - - enum: - - auto - type: string - - maximum: 50 - minimum: 1 - type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" - default: auto - description: The hyperparameters used for the fine-tuning job. - suffix: - maxLength: 40 - minLength: 1 + description: 'Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.' + nullable: true + additional_instructions: type: string - description: "A string of up to 18 characters that will be added to your fine-tuned model name.\n\nFor example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.\n" - default: '' + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + nullable: true + additional_messages: + type: array + items: + $ref: '#/components/schemas/CreateMessageRequest' + description: Adds additional messages to the thread before creating the run. + nullable: true + tools: + maxItems: 20 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/AssistantToolsCode' + file_search: '#/components/schemas/AssistantToolsFileSearch' + function: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 nullable: true - validation_file: - type: string - description: "The ID of an uploaded file that contains validation data.\n\nIf you provide this file, the data is used to generate validation\nmetrics periodically during fine-tuning. These metrics can be viewed in\nthe fine-tuning results file.\nThe same data should not be present in both train and validation files.\n\nYour dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" + example: 1 + stream: + type: boolean + description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" nullable: true - example: file-abc123 - integrations: - type: array - items: - required: - - type - - wandb - type: object - properties: - type: - oneOf: - - enum: - - wandb - type: string - description: "The type of integration to enable. Currently, only \"wandb\" (Weights and Biases) is supported.\n" - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - description: A list of integrations to enable for your fine-tuning job. + max_prompt_tokens: + minimum: 256 + type: integer + description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" nullable: true - seed: - maximum: 2147483647 - minimum: 0 + max_completion_tokens: + minimum: 256 type: integer - description: "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you.\n" + description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" nullable: true - example: 42 - ListFineTuningJobEventsResponse: + truncation_strategy: + $ref: '#/components/schemas/TruncationObject' + tool_choice: + $ref: '#/components/schemas/AssistantsApiToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + additionalProperties: false + CreateSpeechRequest: required: - - object - - data + - model + - input + - voice type: object properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobEvent' - object: - enum: - - list + model: + anyOf: + - type: string + - enum: + - tts-1 + - tts-1-hd + type: string + description: "One of the available [TTS models](/docs/models#tts): `tts-1` or `tts-1-hd`\n" + x-oaiTypeLabel: string + input: + maxLength: 4096 type: string - ListFineTuningJobCheckpointsResponse: - required: - - object - - data - - has_more - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobCheckpoint' - object: + description: The text to generate audio for. The maximum length is 4096 characters. + voice: enum: - - list - type: string - first_id: + - alloy + - echo + - fable + - onyx + - nova + - shimmer type: string - nullable: true - last_id: + description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options).' + response_format: + enum: + - mp3 + - opus + - aac + - flac + - wav + - pcm type: string - nullable: true - has_more: - type: boolean - CreateEmbeddingRequest: + description: 'The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.' + default: mp3 + speed: + maximum: 4 + minimum: 0.25 + type: number + description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. + default: 1 + additionalProperties: false + CreateThreadAndRunRequest: required: - - model - - input + - assistant_id type: object properties: - input: - oneOf: - - title: string - type: string - description: The string that will be turned into an embedding. - default: '' - example: This is a test. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: string - default: '' - example: '[''This is a test.'']' - description: The array of strings that will be turned into an embedding. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: integer - description: The array of integers that will be turned into an embedding. - example: '[1212, 318, 257, 1332, 13]' - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - description: The array of arrays containing integers that will be turned into an embedding. - example: '[[1212, 318, 257, 1332, 13]]' - description: "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - example: The quick brown fox jumped over the lazy dog - x-oaiExpandable: true + assistant_id: + type: string + description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' + thread: + $ref: '#/components/schemas/CreateThreadRequest' model: anyOf: - type: string - enum: - - text-embedding-ada-002 - - text-embedding-3-small - - text-embedding-3-large + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: text-embedding-3-small + description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' + nullable: true + example: gpt-4o x-oaiTypeLabel: string - encoding_format: - enum: - - float - - base64 + instructions: type: string - description: 'The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).' - default: float - example: float - dimensions: - minimum: 1 + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + nullable: true + tools: + maxItems: 20 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/AssistantToolsCode' + file_search: '#/components/schemas/AssistantToolsFileSearch' + function: '#/components/schemas/AssistantToolsFunction' + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + tool_resources: + type: object + properties: + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + temperature: + maximum: 2 + minimum: 0 + type: number + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" + default: 1 + nullable: true + example: 1 + top_p: + maximum: 1 + minimum: 0 + type: number + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" + default: 1 + nullable: true + example: 1 + stream: + type: boolean + description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" + nullable: true + max_prompt_tokens: + minimum: 256 type: integer - description: "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.\n" + description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" nullable: true - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 + max_completion_tokens: + minimum: 256 + type: integer + description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" + nullable: true + truncation_strategy: + $ref: '#/components/schemas/TruncationObject' + tool_choice: + $ref: '#/components/schemas/AssistantsApiToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' additionalProperties: false - CreateEmbeddingResponse: - required: - - object - - model - - data - - usage + CreateThreadRequest: type: object properties: - data: + messages: type: array items: - $ref: '#/components/schemas/Embedding' - description: The list of embeddings generated by the model. - model: - type: string - description: The name of the model used to generate the embedding. - object: - enum: - - list - type: string - description: 'The object type, which is always "list".' - usage: - required: - - prompt_tokens - - total_tokens + $ref: '#/components/schemas/CreateMessageRequest' + description: 'A list of [messages](/docs/api-reference/messages) to start the thread with.' + tool_resources: type: object properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - description: The usage information for the request. + code_interpreter: + type: object + properties: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: + type: object + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + properties: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" + vector_stores: + maxItems: 1 + type: array + items: + type: object + properties: + file_ids: + maxItems: 10000 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" + chunking_strategy: + type: object + oneOf: + - title: Auto Chunking Strategy + required: + - type + type: object + properties: + type: + enum: + - auto + type: string + description: Always `auto`. + additionalProperties: false + description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + - title: Static Chunking Strategy + required: + - type + - static + type: object + properties: + type: + enum: + - static + type: string + description: Always `static`. + static: + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + type: object + properties: + max_chunk_size_tokens: + maximum: 4096 + minimum: 100 + type: integer + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" + additionalProperties: false + additionalProperties: false + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + discriminator: + propertyName: type + mapping: + auto: '#/components/schemas/CreateThreadRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/0' + static: '#/components/schemas/CreateThreadRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/1' + x-oaiExpandable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + x-oaiTypeLabel: map + x-oaiExpandable: true + description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread.\n" + description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false CreateTranscriptionRequest: required: - file @@ -5623,17 +8530,9 @@ components: description: "The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.\n" prompt: type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.\n" + description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language.\n" response_format: - enum: - - json - - text - - srt - - verbose_json - - vtt - type: string - description: "The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" - default: json + $ref: '#/components/schemas/AudioResponseFormat' temperature: type: number description: "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n" @@ -5661,77 +8560,7 @@ components: x-oaiMeta: name: The transcription object (JSON) group: audio - example: "{\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n}\n" - TranscriptionSegment: - required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob - type: object - properties: - id: - type: integer - description: Unique identifier of the segment. - seek: - type: integer - description: Seek offset of the segment. - start: - type: number - description: Start time of the segment in seconds. - format: float - end: - type: number - description: End time of the segment in seconds. - format: float - text: - type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - description: Temperature parameter used for generating the segment. - format: float - avg_logprob: - type: number - description: 'Average logprob of the segment. If the value is lower than -1, consider the logprobs failed.' - format: float - compression_ratio: - type: number - description: 'Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed.' - format: float - no_speech_prob: - type: number - description: 'Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent.' - format: float - TranscriptionWord: - required: - - word - - start - - end - type: object - properties: - word: - type: string - description: The text content of the word. - start: - type: number - description: Start time of the word in seconds. - format: float - end: - type: number - description: End time of the word in seconds. - format: float + example: "{\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n}\n" CreateTranscriptionResponseVerboseJson: required: - language @@ -5785,11 +8614,9 @@ components: x-oaiTypeLabel: string prompt: type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.\n" + description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English.\n" response_format: - type: string - description: "The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" - default: json + $ref: '#/components/schemas/AudioResponseFormat' temperature: type: number description: "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n" @@ -5823,212 +8650,220 @@ components: items: $ref: '#/components/schemas/TranscriptionSegment' description: Segments of the translated text and their corresponding details. - CreateSpeechRequest: + CreateUploadRequest: required: - - model - - input - - voice + - filename + - purpose + - bytes + - mime_type type: object properties: - model: - anyOf: - - type: string - - enum: - - tts-1 - - tts-1-hd - type: string - description: "One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`\n" - x-oaiTypeLabel: string - input: - maxLength: 4096 + filename: type: string - description: The text to generate audio for. The maximum length is 4096 characters. - voice: + description: "The name of the file to upload.\n" + purpose: enum: - - alloy - - echo - - fable - - onyx - - nova - - shimmer + - assistants + - batch + - fine-tune + - vision type: string - description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options).' - response_format: - enum: - - mp3 - - opus - - aac - - flac - - wav - - pcm + description: "The intended purpose of the uploaded file.\n\nSee the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).\n" + bytes: + type: integer + description: "The number of bytes in the file you are uploading.\n" + mime_type: type: string - description: 'The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.' - default: mp3 - speed: - maximum: 4.0 - minimum: 0.25 - type: number - description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. - default: 1 + description: "The MIME type of the file.\n\nThis must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision.\n" additionalProperties: false - Model: - title: Model + CreateVectorStoreFileBatchRequest: + required: + - file_ids + type: object + properties: + file_ids: + maxItems: 500 + minItems: 1 + type: array + items: + type: string + description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' + chunking_strategy: + $ref: '#/components/schemas/ChunkingStrategyRequestParam' + additionalProperties: false + CreateVectorStoreFileRequest: + required: + - file_id + type: object + properties: + file_id: + type: string + description: 'A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.' + chunking_strategy: + $ref: '#/components/schemas/ChunkingStrategyRequestParam' + additionalProperties: false + CreateVectorStoreRequest: + type: object + properties: + file_ids: + maxItems: 500 + type: array + items: + type: string + description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' + name: + type: string + description: The name of the vector store. + expires_after: + $ref: '#/components/schemas/VectorStoreExpirationAfter' + chunking_strategy: + type: object + oneOf: + - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' + - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' + description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.' + discriminator: + propertyName: type + mapping: + auto: '#/components/schemas/AutoChunkingStrategyRequestParam' + static: '#/components/schemas/StaticChunkingStrategyRequestParam' + x-oaiExpandable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false + DefaultProjectErrorResponse: + required: + - code + - message + type: object + properties: + code: + type: integer + message: + type: string + DeleteAssistantResponse: required: - id - object - - created - - owned_by + - deleted + type: object properties: id: type: string - description: 'The model identifier, which can be referenced in the API endpoints.' - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. + deleted: + type: boolean object: enum: - - model - type: string - description: 'The object type, which is always "model".' - owned_by: + - assistant.deleted type: string - description: The organization that owns the model. - description: Describes an OpenAI model offering that can be used with the API. - x-oaiMeta: - name: The model object - example: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" - OpenAIFile: - title: OpenAIFile + DeleteFileResponse: required: - id - object - - bytes - - created_at - - filename - - purpose - - status + - deleted + type: object properties: id: type: string - description: 'The file identifier, which can be referenced in the API endpoints.' - bytes: - type: integer - description: 'The size of the file, in bytes.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. object: enum: - file type: string - description: 'The object type, which is always `file`.' - purpose: - enum: - - assistants - - assistants_output - - batch - - batch_output - - fine-tune - - fine-tune-results - - vision + deleted: + type: boolean + DeleteMessageResponse: + required: + - id + - object + - deleted + type: object + properties: + id: type: string - description: 'The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`.' - status: + deleted: + type: boolean + object: enum: - - uploaded - - processed - - error - type: string - description: 'Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`.' - deprecated: true - status_details: + - thread.message.deleted type: string - description: 'Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.' - deprecated: true - description: The `File` object represents a document that has been uploaded to OpenAI. - x-oaiMeta: - name: The file object - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n}\n" - Upload: - title: Upload + DeleteModelResponse: required: - - bytes - - created_at - - expires_at - - filename - id - - purpose - - status - - step_number + - object + - deleted type: object properties: id: type: string - description: 'The Upload unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: + deleted: + type: boolean + object: type: string - description: The name of the file to be uploaded. - bytes: - type: integer - description: The intended number of bytes to be uploaded. - purpose: + DeleteThreadResponse: + required: + - id + - object + - deleted + type: object + properties: + id: type: string - description: 'The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values.' - status: + deleted: + type: boolean + object: enum: - - pending - - completed - - cancelled - - expired + - thread.deleted + type: string + DeleteVectorStoreFileResponse: + required: + - id + - object + - deleted + type: object + properties: + id: type: string - description: The status of the Upload. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. + deleted: + type: boolean object: enum: - - upload + - vector_store.file.deleted type: string - description: 'The object type, which is always "upload".' - file: - $ref: '#/components/schemas/OpenAIFile' - description: "The Upload object can accept byte chunks in the form of Parts.\n" - x-oaiMeta: - name: The upload object - example: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - UploadPart: - title: UploadPart + DeleteVectorStoreResponse: required: - - created_at - id - object - - upload_id + - deleted type: object properties: id: type: string - description: 'The upload Part unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: - type: string - description: The ID of the Upload object that this Part was added to. + deleted: + type: boolean object: enum: - - upload.part + - vector_store.deleted type: string - description: 'The object type, which is always `upload.part`.' - description: "The upload Part represents a chunk of bytes we can add to an Upload object.\n" + DoneEvent: + required: + - event + - data + type: object + properties: + event: + enum: + - done + type: string + data: + enum: + - '[DONE]' + type: string + description: Occurs when a stream ends. x-oaiMeta: - name: The upload part object - example: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719186911,\n \"upload_id\": \"upload_abc123\"\n}\n" + dataDescription: '`data` is `[DONE]`' Embedding: required: - index @@ -6053,6 +8888,114 @@ components: x-oaiMeta: name: The embedding object example: "{\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n}\n" + Error: + required: + - type + - message + - param + - code + type: object + properties: + code: + type: string + nullable: true + message: + type: string + param: + type: string + nullable: true + type: + type: string + ErrorEvent: + required: + - event + - data + type: object + properties: + event: + enum: + - error + type: string + data: + $ref: '#/components/schemas/Error' + description: 'Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout.' + x-oaiMeta: + dataDescription: '`data` is an [error](/docs/guides/error-codes#api-errors)' + ErrorResponse: + required: + - error + type: object + properties: + error: + $ref: '#/components/schemas/Error' + FileSearchRankingOptions: + title: File search tool call ranking options + required: + - score_threshold + type: object + properties: + ranker: + enum: + - auto + - default_2024_08_21 + type: string + description: The ranker to use for the file search. If not specified will use the `auto` ranker. + score_threshold: + maximum: 1 + minimum: 0 + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + description: "The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.\n\nSee the [file search tool documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.\n" + FineTuneChatCompletionRequestAssistantMessage: + required: + - role + allOf: + - title: Assistant message + type: object + properties: + weight: + enum: + - 0 + - 1 + type: integer + description: Controls whether the assistant message is trained against (0 or 1) + - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' + FineTuningIntegration: + title: Fine-Tuning Job Integration + required: + - type + - wandb + type: object + properties: + type: + enum: + - wandb + type: string + description: The type of the integration being enabled for the fine-tuning job + wandb: + required: + - project + type: object + properties: + project: + type: string + description: "The name of the project that the new run will be created under.\n" + example: my-wandb-project + name: + type: string + description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" + nullable: true + entity: + type: string + description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" + nullable: true + tags: + type: array + items: + type: string + example: custom-tag + description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" + description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" FineTuningJob: title: FineTuningJob required: @@ -6124,127 +9067,62 @@ components: description: 'The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.' model: type: string - description: The base model that is being fine-tuned. - object: - enum: - - fine_tuning.job - type: string - description: 'The object type, which is always "fine_tuning.job".' - organization_id: - type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - items: - type: string - example: file-abc123 - description: 'The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents).' - status: - enum: - - validating_files - - queued - - running - - succeeded - - failed - - cancelled - type: string - description: 'The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.' - trained_tokens: - type: integer - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - nullable: true - training_file: - type: string - description: 'The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents).' - validation_file: - type: string - description: 'The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents).' - nullable: true - integrations: - maxItems: 5 - type: array - items: - oneOf: - - $ref: '#/components/schemas/FineTuningIntegration' - x-oaiExpandable: true - description: A list of integrations to enable for this fine-tuning job. - nullable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. - nullable: true - description: "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.\n" - x-oaiMeta: - name: The fine-tuning job object - example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - FineTuningIntegration: - title: Fine-Tuning Job Integration - required: - - type - - wandb - type: object - properties: - type: - enum: - - wandb - type: string - description: The type of the integration being enabled for the fine-tuning job - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - FineTuningJobEvent: - required: - - id - - object - - created_at - - level - - message - type: object - properties: - id: - type: string - created_at: - type: integer - level: + description: The base model that is being fine-tuned. + object: enum: - - info - - warn - - error + - fine_tuning.job type: string - message: + description: 'The object type, which is always "fine_tuning.job".' + organization_id: type: string - object: + description: The organization that owns the fine-tuning job. + result_files: + type: array + items: + type: string + example: file-abc123 + description: 'The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents).' + status: enum: - - fine_tuning.job.event + - validating_files + - queued + - running + - succeeded + - failed + - cancelled type: string - description: Fine-tuning job event object + description: 'The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.' + trained_tokens: + type: integer + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + nullable: true + training_file: + type: string + description: 'The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents).' + validation_file: + type: string + description: 'The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents).' + nullable: true + integrations: + maxItems: 5 + type: array + items: + oneOf: + - $ref: '#/components/schemas/FineTuningIntegration' + x-oaiExpandable: true + description: A list of integrations to enable for this fine-tuning job. + nullable: true + seed: + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. + nullable: true + description: "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.\n" x-oaiMeta: - name: The fine-tuning job event object - example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\"\n}\n" + name: The fine-tuning job object + example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" FineTuningJobCheckpoint: title: FineTuningJobCheckpoint required: @@ -6299,6 +9177,35 @@ components: x-oaiMeta: name: The fine-tuning job checkpoint object example: "{\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P\",\n \"created_at\": 1712211699,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88\",\n \"fine_tuning_job_id\": \"ftjob-fpbNQ3H1GrMehXRf8cO97xTN\",\n \"metrics\": {\n \"step\": 88,\n \"train_loss\": 0.478,\n \"train_mean_token_accuracy\": 0.924,\n \"valid_loss\": 10.112,\n \"valid_mean_token_accuracy\": 0.145,\n \"full_valid_loss\": 0.567,\n \"full_valid_mean_token_accuracy\": 0.944\n },\n \"step_number\": 88\n}\n" + FineTuningJobEvent: + required: + - id + - object + - created_at + - level + - message + type: object + properties: + id: + type: string + created_at: + type: integer + level: + enum: + - info + - warn + - error + type: string + message: + type: string + object: + enum: + - fine_tuning.job.event + type: string + description: Fine-tuning job event object + x-oaiMeta: + name: The fine-tuning job event object + example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\"\n}\n" FinetuneChatRequestInput: type: object properties: @@ -6335,1053 +9242,1075 @@ components: FinetuneCompletionRequestInput: type: object properties: - prompt: + prompt: + type: string + description: The input prompt for this training example. + completion: + type: string + description: The desired completion for this training example. + description: The per-line training example of a fine-tuning input file for completions models + x-oaiMeta: + name: Training format for completions models + example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" + FunctionObject: + required: + - name + type: object + properties: + description: + type: string + description: 'A description of what the function does, used by the model to choose when and how to call the function.' + name: + type: string + description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' + parameters: + $ref: '#/components/schemas/FunctionParameters' + strict: + type: boolean + description: 'Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).' + default: false + nullable: true + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + Image: + type: object + properties: + b64_json: + type: string + description: 'The base64-encoded JSON of the generated image, if `response_format` is `b64_json`.' + url: + type: string + description: 'The URL of the generated image, if `response_format` is `url` (default).' + revised_prompt: + type: string + description: 'The prompt that was used to generate the image, if there was any revision to the prompt.' + description: Represents the url or the content of an image generated by the OpenAI API. + x-oaiMeta: + name: The image object + example: "{\n \"url\": \"...\",\n \"revised_prompt\": \"...\"\n}\n" + ImagesResponse: + required: + - created + - data + properties: + created: + type: integer + data: + type: array + items: + $ref: '#/components/schemas/Image' + Invite: + required: + - object + - id + - email + - role + - status + - invited_at + - expires_at + type: object + properties: + object: + enum: + - organization.invite + type: string + description: 'The object type, which is always `organization.invite`' + id: + type: string + description: 'The identifier, which can be referenced in API endpoints' + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + enum: + - owner + - reader + type: string + description: '`owner` or `reader`' + status: + enum: + - accepted + - expired + - pending + type: string + description: '`accepted`,`expired`, or `pending`' + invited_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + description: Represents an individual `invite` to the organization. + x-oaiMeta: + name: The invite object + example: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" + InviteDeleteResponse: + required: + - object + - id + - deleted + type: object + properties: + object: + enum: + - organization.invite.deleted + type: string + description: 'The object type, which is always `organization.invite.deleted`' + id: + type: string + deleted: + type: boolean + InviteListResponse: + required: + - object + - data + type: object + properties: + object: + enum: + - list + type: string + description: 'The object type, which is always `list`' + data: + type: array + items: + $ref: '#/components/schemas/Invite' + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there are additional results. + InviteRequest: + required: + - email + - role + type: object + properties: + email: + type: string + description: Send an email to this address + role: + enum: + - reader + - owner + type: string + description: '`owner` or `reader`' + ListAssistantsResponse: + required: + - object + - data + - first_id + - last_id + - has_more + type: object + properties: + object: type: string - description: The input prompt for this training example. - completion: + example: list + data: + type: array + items: + $ref: '#/components/schemas/AssistantObject' + first_id: type: string - description: The desired completion for this training example. - description: The per-line training example of a fine-tuning input file for completions models + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false x-oaiMeta: - name: Training format for completions models - example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" - CompletionUsage: + name: List assistants response object + group: chat + example: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" + ListAuditLogsResponse: required: - - prompt_tokens - - completion_tokens - - total_tokens + - object + - data + - first_id + - last_id + - has_more type: object properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - description: Usage statistics for the completion request. - RunCompletionUsage: + object: + enum: + - list + type: string + data: + type: array + items: + $ref: '#/components/schemas/AuditLog' + first_id: + type: string + example: audit_log-defb456h8dks + last_id: + type: string + example: audit_log-hnbkd8s93s + has_more: + type: boolean + ListBatchesResponse: required: - - prompt_tokens - - completion_tokens - - total_tokens + - object + - data + - has_more type: object properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: 'Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.).' - nullable: true - RunStepCompletionUsage: + data: + type: array + items: + $ref: '#/components/schemas/Batch' + first_id: + type: string + example: batch_abc123 + last_id: + type: string + example: batch_abc456 + has_more: + type: boolean + object: + enum: + - list + type: string + ListFilesResponse: required: - - prompt_tokens - - completion_tokens - - total_tokens + - object + - data + - first_id + - last_id + - has_more type: object properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. - nullable: true - AssistantsApiResponseFormatOption: - oneOf: - - enum: - - auto + object: type: string - description: "`auto` is the default value\n" - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - x-oaiExpandable: true - AssistantObject: - title: Assistant + example: list + data: + type: array + items: + $ref: '#/components/schemas/OpenAIFile' + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + ListFineTuningJobCheckpointsResponse: required: - - id - object - - created_at - - name - - description - - model - - instructions - - tools - - metadata + - data + - has_more type: object properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJobCheckpoint' object: enum: - - assistant - type: string - description: 'The object type, which is always `assistant`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the assistant was created. - name: - maxLength: 256 + - list type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 + first_id: type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" nullable: true - model: - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - instructions: - maxLength: 256000 + last_id: type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/AssistantToolsCode' - file_search: '#/components/schemas/AssistantToolsFileSearch' - function: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" - default: 1 nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: Represents an `assistant` that can call the model and use tools. - x-oaiMeta: - name: The assistant object - beta: true - example: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - CreateAssistantRequest: + has_more: + type: boolean + ListFineTuningJobEventsResponse: + required: + - object + - data + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJobEvent' + object: + enum: + - list + type: string + ListMessagesResponse: required: - - model + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: '#/components/schemas/MessageObject' + first_id: + type: string + example: msg_abc123 + last_id: + type: string + example: msg_abc123 + has_more: + type: boolean + example: false + ListModelsResponse: + required: + - object + - data type: object properties: - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: gpt-4o - x-oaiTypeLabel: string - name: - maxLength: 256 + object: + enum: + - list type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 + data: + type: array + items: + $ref: '#/components/schemas/Model' + ListPaginatedFineTuningJobsResponse: + required: + - object + - data + - has_more + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/FineTuningJob' + has_more: + type: boolean + object: + enum: + - list type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - instructions: - maxLength: 256000 + ListRunStepsResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 + example: list + data: type: array items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/AssistantToolsCode' - file_search: '#/components/schemas/AssistantToolsFileSearch' - function: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - discriminator: - propertyName: type - mapping: - auto: '#/components/schemas/CreateAssistantRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/0' - static: '#/components/schemas/CreateAssistantRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/1' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - x-oaiTypeLabel: map - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ModifyAssistantRequest: + $ref: '#/components/schemas/RunStepObject' + first_id: + type: string + example: step_abc123 + last_id: + type: string + example: step_abc456 + has_more: + type: boolean + example: false + ListRunsResponse: + required: + - object + - data + - first_id + - last_id + - has_more type: object properties: - model: - anyOf: - - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - name: - maxLength: 256 + object: type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 + example: list + data: + type: array + items: + $ref: '#/components/schemas/RunObject' + first_id: type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - instructions: - maxLength: 256000 + example: run_abc123 + last_id: type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 + example: run_abc456 + has_more: + type: boolean + example: false + ListThreadsResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: type: array items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/AssistantToolsCode' - file_search: '#/components/schemas/AssistantToolsFileSearch' - function: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: + $ref: '#/components/schemas/ThreadObject' + first_id: + type: string + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false + ListVectorStoreFilesResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: '#/components/schemas/VectorStoreFileObject' + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + ListVectorStoresResponse: + required: + - object + - data + - first_id + - last_id + - has_more + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: '#/components/schemas/VectorStoreObject' + first_id: + type: string + example: vs_abc123 + last_id: + type: string + example: vs_abc456 + has_more: + type: boolean + example: false + MessageContentImageFileObject: + title: Image file + required: + - type + - image_file + type: object + properties: + type: + enum: + - image_file + type: string + description: Always `image_file`. + image_file: + required: + - file_id type: object properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: + file_id: + type: string + description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' + default: auto + description: 'References an image [File](/docs/api-reference/files) in the content of a message.' + MessageContentImageUrlObject: + title: Image URL + required: + - type + - image_url + type: object + properties: + type: + enum: + - image_url + type: string + description: The type of the content part. + image_url: + required: + - url type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 + properties: + url: + type: string + description: 'The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' + format: uri + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto`' + default: auto + description: References an image URL in the content of a message. + MessageContentRefusalObject: + title: Refusal + required: + - type + - refusal + type: object + properties: + type: + enum: + - refusal + type: string + description: Always `refusal`. + refusal: + type: string + description: The refusal content generated by the assistant. + MessageContentTextAnnotationsFileCitationObject: + title: File citation + required: + - type + - text + - file_citation + - start_index + - end_index + type: object + properties: + type: + enum: + - file_citation + type: string + description: Always `file_citation`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_citation: + required: + - file_id + type: object + properties: + file_id: + type: string + description: The ID of the specific File the citation is from. + start_index: minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 + type: integer + end_index: minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - DeleteAssistantResponse: + type: integer + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + MessageContentTextAnnotationsFilePathObject: + title: File path required: - - id - - object - - deleted + - type + - text + - file_path + - start_index + - end_index type: object properties: - id: - type: string - deleted: - type: boolean - object: + type: enum: - - assistant.deleted + - file_path type: string - ListAssistantsResponse: + description: Always `file_path`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_path: + required: + - file_id + type: object + properties: + file_id: + type: string + description: The ID of the file that was generated. + start_index: + minimum: 0 + type: integer + end_index: + minimum: 0 + type: integer + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + MessageContentTextObject: + title: Text required: - - object - - data - - first_id - - last_id - - has_more + - type + - text type: object properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/AssistantObject' - first_id: - type: string - example: asst_abc123 - last_id: + type: + enum: + - text type: string - example: asst_abc456 - has_more: - type: boolean - example: false - x-oaiMeta: - name: List assistants response object - group: chat - example: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - AssistantToolsCode: - title: Code interpreter tool + description: Always `text`. + text: + required: + - value + - annotations + type: object + properties: + value: + type: string + description: The data that makes up the text. + annotations: + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' + - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' + discriminator: + propertyName: type + mapping: + file_citation: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' + file_path: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' + x-oaiExpandable: true + description: The text content that is part of a message. + MessageDeltaContentImageFileObject: + title: Image file required: + - index - type type: object properties: + index: + type: integer + description: The index of the content part in the message. type: enum: - - code_interpreter + - image_file type: string - description: 'The type of tool being defined: `code_interpreter`' - AssistantToolsFileSearch: - title: FileSearch tool + description: Always `image_file`. + image_file: + type: object + properties: + file_id: + type: string + description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' + default: auto + description: 'References an image [File](/docs/api-reference/files) in the content of a message.' + MessageDeltaContentImageUrlObject: + title: Image URL required: + - index - type type: object properties: + index: + type: integer + description: The index of the content part in the message. type: enum: - - file_search + - image_url type: string - description: 'The type of tool being defined: `file_search`' - file_search: + description: Always `image_url`. + image_url: type: object properties: - max_num_results: - maximum: 50 - minimum: 1 - type: integer - description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information.\n" - description: Overrides for the file search tool. - AssistantToolsFileSearchTypeOnly: - title: FileSearch tool + url: + type: string + description: 'The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' + detail: + enum: + - auto + - low + - high + type: string + description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`.' + default: auto + description: References an image URL in the content of a message. + MessageDeltaContentRefusalObject: + title: Refusal required: + - index - type type: object properties: + index: + type: integer + description: The index of the refusal part in the message. type: enum: - - file_search + - refusal type: string - description: 'The type of tool being defined: `file_search`' - AssistantToolsFunction: - title: Function tool + description: Always `refusal`. + refusal: + type: string + description: The refusal content that is part of a message. + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation required: + - index - type - - function type: object properties: + index: + type: integer + description: The index of the annotation in the text content part. type: enum: - - function + - file_citation type: string - description: 'The type of tool being defined: `function`' - function: - $ref: '#/components/schemas/FunctionObject' - TruncationObject: - title: Thread Truncation Controls + description: Always `file_citation`. + text: + type: string + description: The text in the message content that needs to be replaced. + file_citation: + type: object + properties: + file_id: + type: string + description: The ID of the specific File the citation is from. + quote: + type: string + description: The specific quote in the file. + start_index: + minimum: 0 + type: integer + end_index: + minimum: 0 + type: integer + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path required: + - index - type type: object properties: + index: + type: integer + description: The index of the annotation in the text content part. type: enum: - - auto - - last_messages + - file_path type: string - description: 'The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.' - last_messages: - minimum: 1 - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - nullable: true - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - AssistantsApiToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required + description: Always `file_path`. + text: type: string - description: "`none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.\n" - - $ref: '#/components/schemas/AssistantsNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tools and instead generates a message.\n`auto` is the default value and means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools before responding to the user.\nSpecifying a particular tool like `{\"type\": \"file_search\"}` or `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n" - x-oaiExpandable: true - AssistantsNamedToolChoice: + description: The text in the message content that needs to be replaced. + file_path: + type: object + properties: + file_id: + type: string + description: The ID of the file that was generated. + start_index: + minimum: 0 + type: integer + end_index: + minimum: 0 + type: integer + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + MessageDeltaContentTextObject: + title: Text required: + - index - type type: object properties: + index: + type: integer + description: The index of the content part in the message. type: enum: - - function - - code_interpreter - - file_search + - text type: string - description: 'The type of the tool. If type is `function`, the function name must be set' - function: - required: - - name + description: Always `text`. + text: type: object properties: - name: + value: type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific tool. - RunObject: - title: A run on a thread + description: The data that makes up the text. + annotations: + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' + - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' + discriminator: + propertyName: type + mapping: + file_citation: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' + file_path: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' + x-oaiExpandable: true + description: The text content that is part of a message. + MessageDeltaObject: + title: Message delta object required: - id - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format + - delta type: object properties: id: type: string - description: 'The identifier, which can be referenced in API endpoints.' + description: 'The identifier of the message, which can be referenced in API endpoints.' object: enum: - - thread.run - type: string - description: 'The object type, which is always `thread.run`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was created. - thread_id: - type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run.' - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run.' - status: - enum: - - queued - - in_progress - - requires_action - - cancelling - - cancelled - - failed - - completed - - incomplete - - expired + - thread.message.delta type: string - description: 'The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`.' - required_action: - required: - - type - - submit_tool_outputs - type: object - properties: - type: - enum: - - submit_tool_outputs - type: string - description: 'For now, this is always `submit_tool_outputs`.' - submit_tool_outputs: - required: - - tool_calls - type: object - properties: - tool_calls: - type: array - items: - $ref: '#/components/schemas/RunToolCallObject' - description: A list of the relevant tool calls. - description: Details on the tool outputs needed for this run to continue. - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - - invalid_prompt - type: string - description: 'One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.' - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the run will expire. - nullable: true - started_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was started. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run failed. - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was completed. - nullable: true - incomplete_details: + description: 'The object type, which is always `thread.message.delta`.' + delta: type: object properties: - reason: + role: enum: - - max_completion_tokens - - max_prompt_tokens + - user + - assistant type: string - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - nullable: true - model: - type: string - description: 'The model that the [assistant](/docs/api-reference/assistants) used for this run.' - instructions: - type: string - description: 'The instructions that the [assistant](/docs/api-reference/assistants) used for this run.' - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/AssistantToolsCode' - file_search: '#/components/schemas/AssistantToolsFileSearch' - function: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: 'The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.' - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunCompletionUsage' - temperature: - type: number - description: 'The sampling temperature used for this run. If not set, defaults to 1.' - nullable: true - top_p: - type: number - description: 'The nucleus sampling value used for this run. If not set, defaults to 1.' - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens specified to have been used over the course of the run.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens specified to have been used over the course of the run.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: 'Represents an execution run on a [thread](/docs/api-reference/threads).' + description: The entity that produced the message. One of `user` or `assistant`. + content: + type: array + items: + oneOf: + - $ref: '#/components/schemas/MessageDeltaContentImageFileObject' + - $ref: '#/components/schemas/MessageDeltaContentTextObject' + - $ref: '#/components/schemas/MessageDeltaContentRefusalObject' + - $ref: '#/components/schemas/MessageDeltaContentImageUrlObject' + discriminator: + propertyName: type + mapping: + image_file: '#/components/schemas/MessageDeltaContentImageFileObject' + text: '#/components/schemas/MessageDeltaContentTextObject' + refusal: '#/components/schemas/MessageDeltaContentRefusalObject' + image_url: '#/components/schemas/MessageDeltaContentImageUrlObject' + x-oaiExpandable: true + description: The content of the message in array of text and/or images. + description: The delta containing the fields that have changed on the Message. + description: "Represents a message delta i.e. any changed fields on a message during streaming.\n" x-oaiMeta: - name: The run object + name: The message delta object beta: true - example: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1698107661,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699073476,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699073498,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [{\"type\": \"file_search\"}, {\"type\": \"code_interpreter\"}],\n \"metadata\": {},\n \"incomplete_details\": null,\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - CreateRunRequest: + example: "{\n \"id\": \"msg_123\",\n \"object\": \"thread.message.delta\",\n \"delta\": {\n \"content\": [\n {\n \"index\": 0,\n \"type\": \"text\",\n \"text\": { \"value\": \"Hello\", \"annotations\": [] }\n }\n ]\n }\n}\n" + MessageObject: + title: The message object required: + - id + - object + - created_at - thread_id + - status + - incomplete_details + - completed_at + - incomplete_at + - role + - content - assistant_id + - run_id + - attachments + - metadata type: object properties: - assistant_id: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' + object: + enum: + - thread.message + type: string + description: 'The object type, which is always `thread.message`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the message was created. + thread_id: type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' - nullable: true - example: gpt-4o - x-oaiTypeLabel: string - instructions: + description: 'The [thread](/docs/api-reference/threads) ID that this message belongs to.' + status: + enum: + - in_progress + - incomplete + - completed type: string - description: 'Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.' + description: 'The status of the message, which can be either `in_progress`, `incomplete`, or `completed`.' + incomplete_details: + required: + - reason + type: object + properties: + reason: + enum: + - content_filter + - max_tokens + - run_cancelled + - run_expired + - run_failed + type: string + description: The reason the message is incomplete. + description: 'On an incomplete message, details about why the message is incomplete.' nullable: true - additional_instructions: - type: string - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the message was completed. nullable: true - additional_messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: Adds additional messages to the thread before creating the run. + incomplete_at: + type: integer + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. nullable: true - tools: - maxItems: 20 + role: + enum: + - user + - assistant + type: string + description: The entity that produced the message. One of `user` or `assistant`. + content: type: array items: oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' + - $ref: '#/components/schemas/MessageContentImageFileObject' + - $ref: '#/components/schemas/MessageContentImageUrlObject' + - $ref: '#/components/schemas/MessageContentTextObject' + - $ref: '#/components/schemas/MessageContentRefusalObject' discriminator: propertyName: type mapping: - code_interpreter: '#/components/schemas/AssistantToolsCode' - file_search: '#/components/schemas/AssistantToolsFileSearch' - function: '#/components/schemas/AssistantToolsFunction' + image_file: '#/components/schemas/MessageContentImageFileObject' + image_url: '#/components/schemas/MessageContentImageUrlObject' + text: '#/components/schemas/MessageContentTextObject' + refusal: '#/components/schemas/MessageContentRefusalObject' x-oaiExpandable: true - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ListRunsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/RunObject' - first_id: + description: The content of the message in array of text and/or images. + assistant_id: type: string - example: run_abc123 - last_id: + description: 'If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message.' + nullable: true + run_id: type: string - example: run_abc456 - has_more: - type: boolean - example: false - ModifyRunRequest: - type: object - properties: - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" + description: 'The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints.' nullable: true - x-oaiTypeLabel: map - additionalProperties: false - SubmitToolOutputsRunRequest: - required: - - tool_outputs - type: object - properties: - tool_outputs: + attachments: type: array items: type: object properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: + file_id: type: string - description: The output of the tool call to be submitted to continue the run. - description: A list of tools for which the outputs are being submitted. - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" + description: The ID of the file to attach to the message. + tools: + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/AssistantToolsCode' + file_search: '#/components/schemas/AssistantToolsFileSearchTypeOnly' + x-oaiExpandable: true + description: The tools to add this file to. + description: 'A list of files attached to the message, and the tools they were added to.' nullable: true - additionalProperties: false - RunToolCallObject: + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + description: 'Represents a message within a [thread](/docs/api-reference/threads).' + x-oaiMeta: + name: The message object + beta: true + example: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1698983503,\n \"thread_id\": \"thread_abc123\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hi! How can I help you today?\",\n \"annotations\": []\n }\n }\n ],\n \"assistant_id\": \"asst_abc123\",\n \"run_id\": \"run_abc123\",\n \"attachments\": [],\n \"metadata\": {}\n}\n" + MessageRequestContentTextObject: + title: Text required: - - id - type - - function + - text type: object properties: - id: - type: string - description: 'The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint.' type: enum: - - function + - text type: string - description: 'The type of tool call the output is required for. For now, this is always `function`.' - function: - required: - - name - - arguments - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments that the model expects you to pass to the function. - description: The function definition. - description: Tool call objects - CreateThreadAndRunRequest: + description: Always `text`. + text: + type: string + description: Text content to be sent to the model + description: The text content that is part of a message. + Model: + title: Model required: - - thread_id - - assistant_id - type: object + - id + - object + - created + - owned_by properties: - assistant_id: + id: type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - thread: - $ref: '#/components/schemas/CreateThreadRequest' + description: 'The model identifier, which can be referenced in the API endpoints.' + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: + enum: + - model + type: string + description: 'The object type, which is always "model".' + owned_by: + type: string + description: The organization that owns the model. + description: Describes an OpenAI model offering that can be used with the API. + x-oaiMeta: + name: The model object + example: "{\n \"id\": \"VAR_chat_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" + ModifyAssistantRequest: + type: object + properties: model: anyOf: - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' + description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.\n" + name: + maxLength: 256 + type: string + description: "The name of the assistant. The maximum length is 256 characters.\n" + nullable: true + description: + maxLength: 512 + type: string + description: "The description of the assistant. The maximum length is 512 characters.\n" nullable: true - example: gpt-4o - x-oaiTypeLabel: string instructions: + maxLength: 256000 type: string - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" nullable: true tools: - maxItems: 20 + maxItems: 128 type: array items: oneOf: @@ -7394,8 +10323,8 @@ components: code_interpreter: '#/components/schemas/AssistantToolsCode' file_search: '#/components/schemas/AssistantToolsFileSearch' function: '#/components/schemas/AssistantToolsFunction' - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true + x-oaiExpandable: true + description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" tool_resources: type: object properties: @@ -7407,7 +10336,7 @@ components: type: array items: type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + description: "Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" file_search: type: object properties: @@ -7416,19 +10345,19 @@ components: type: array items: type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" + description: "Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" nullable: true metadata: type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true x-oaiTypeLabel: map temperature: maximum: 2 minimum: 0 type: number - description: empty + description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" default: 1 nullable: true example: 1 @@ -7436,193 +10365,28 @@ components: maximum: 1 minimum: 0 type: number - description: empty + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" default: 1 nullable: true example: 1 - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ThreadObject: - title: Thread - required: - - id - - object - - created_at - - tool_resources - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread - type: string - description: 'The object type, which is always `thread`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the thread was created. - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: 'Represents a thread that contains [messages](/docs/api-reference/messages).' - x-oaiMeta: - name: The thread object - beta: true - example: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1698107661,\n \"metadata\": {}\n}\n" - CreateThreadRequest: - type: object - properties: - messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: 'A list of [messages](/docs/api-reference/messages) to start the thread with.' - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - discriminator: - propertyName: type - mapping: - auto: '#/components/schemas/CreateThreadRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/0' - static: '#/components/schemas/CreateThreadRequest/properties/tool_resources/properties/file_search/properties/vector_stores/items/properties/chunking_strategy/oneOf/1' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - x-oaiTypeLabel: map - x-oaiExpandable: true - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + additionalProperties: false + ModifyMessageRequest: + type: object + properties: + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true + x-oaiTypeLabel: map + additionalProperties: false + ModifyRunRequest: + type: object + properties: metadata: type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true x-oaiTypeLabel: map additionalProperties: false @@ -7654,3072 +10418,3496 @@ components: nullable: true metadata: type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" nullable: true x-oaiTypeLabel: map additionalProperties: false - DeleteThreadResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - thread.deleted - type: string - ListThreadsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/ThreadObject' - first_id: - type: string - example: asst_abc123 - last_id: - type: string - example: asst_abc456 - has_more: - type: boolean - example: false - MessageObject: - title: The message object + OpenAIFile: + title: OpenAIFile required: - id - object + - bytes - created_at - - thread_id + - filename + - purpose - status - - incomplete_details - - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments - - metadata - type: object properties: id: type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread.message - type: string - description: 'The object type, which is always `thread.message`.' + description: 'The file identifier, which can be referenced in the API endpoints.' + bytes: + type: integer + description: 'The size of the file, in bytes.' created_at: type: integer - description: The Unix timestamp (in seconds) for when the message was created. - thread_id: + description: The Unix timestamp (in seconds) for when the file was created. + filename: type: string - description: 'The [thread](/docs/api-reference/threads) ID that this message belongs to.' - status: + description: The name of the file. + object: enum: - - in_progress - - incomplete - - completed + - file type: string - description: 'The status of the message, which can be either `in_progress`, `incomplete`, or `completed`.' - incomplete_details: - required: - - reason - type: object - properties: - reason: - enum: - - content_filter - - max_tokens - - run_cancelled - - run_expired - - run_failed - type: string - description: The reason the message is incomplete. - description: 'On an incomplete message, details about why the message is incomplete.' - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was completed. - nullable: true - incomplete_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. - nullable: true - role: + description: 'The object type, which is always `file`.' + purpose: enum: - - user - - assistant + - assistants + - assistants_output + - batch + - batch_output + - fine-tune + - fine-tune-results + - vision type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageContentTextObject' - - $ref: '#/components/schemas/MessageContentRefusalObject' - discriminator: - propertyName: type - mapping: - image_file: '#/components/schemas/MessageContentImageFileObject' - image_url: '#/components/schemas/MessageContentImageUrlObject' - text: '#/components/schemas/MessageContentTextObject' - refusal: '#/components/schemas/MessageContentRefusalObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - assistant_id: + description: 'The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`.' + status: + enum: + - uploaded + - processed + - error type: string - description: 'If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message.' - nullable: true - run_id: + description: 'Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`.' + deprecated: true + status_details: type: string - description: 'The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints.' - nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/AssistantToolsCode' - file_search: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they were added to.' - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: 'Represents a message within a [thread](/docs/api-reference/threads).' + description: 'Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.' + deprecated: true + description: The `File` object represents a document that has been uploaded to OpenAI. x-oaiMeta: - name: The message object - beta: true - example: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1698983503,\n \"thread_id\": \"thread_abc123\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hi! How can I help you today?\",\n \"annotations\": []\n }\n }\n ],\n \"assistant_id\": \"asst_abc123\",\n \"run_id\": \"run_abc123\",\n \"attachments\": [],\n \"metadata\": {}\n}\n" - MessageDeltaObject: - title: Message delta object + name: The file object + example: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n}\n" + OtherChunkingStrategyResponseParam: + title: Other Chunking Strategy required: - - id - - object - - delta + - type type: object properties: - id: - type: string - description: 'The identifier of the message, which can be referenced in API endpoints.' - object: + type: enum: - - thread.message.delta + - other type: string - description: 'The object type, which is always `thread.message.delta`.' - delta: - type: object - properties: - role: - enum: - - user - - assistant - type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentImageFileObject' - - $ref: '#/components/schemas/MessageDeltaContentTextObject' - - $ref: '#/components/schemas/MessageDeltaContentRefusalObject' - - $ref: '#/components/schemas/MessageDeltaContentImageUrlObject' - discriminator: - propertyName: type - mapping: - image_file: '#/components/schemas/MessageDeltaContentImageFileObject' - text: '#/components/schemas/MessageDeltaContentTextObject' - refusal: '#/components/schemas/MessageDeltaContentRefusalObject' - image_url: '#/components/schemas/MessageDeltaContentImageUrlObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - description: The delta containing the fields that have changed on the Message. - description: "Represents a message delta i.e. any changed fields on a message during streaming.\n" - x-oaiMeta: - name: The message delta object - beta: true - example: "{\n \"id\": \"msg_123\",\n \"object\": \"thread.message.delta\",\n \"delta\": {\n \"content\": [\n {\n \"index\": 0,\n \"type\": \"text\",\n \"text\": { \"value\": \"Hello\", \"annotations\": [] }\n }\n ]\n }\n}\n" - CreateMessageRequest: + description: Always `other`. + additionalProperties: false + description: 'This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.' + ParallelToolCalls: + type: boolean + description: 'Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.' + default: true + PredictionContent: + title: Static Content required: - - role + - type - content type: object properties: - role: + type: enum: - - user - - assistant + - content type: string - description: "The role of the entity that is creating the message. Allowed values include:\n- `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n- `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n" + description: "The type of the predicted content you want to provide. This type is\ncurrently always `content`.\n" content: oneOf: - title: Text content type: string - description: The text contents of the message. + description: "The content used for a Predicted Output. This is often the\ntext of a file you are regenerating with minor changes.\n" - title: Array of content parts minItems: 1 type: array items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageRequestContentTextObject' - discriminator: - propertyName: type - mapping: - image_file: '#/components/schemas/MessageContentImageFileObject' - image_url: '#/components/schemas/MessageContentImageUrlObject' - text: '#/components/schemas/MessageRequestContentTextObject' - x-oaiExpandable: true - description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview).' + $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' + description: 'An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs.' + description: "The content that should be matched when generating a model response.\nIf generated tokens would match this content, the entire model response\ncan be returned much more quickly.\n" x-oaiExpandable: true - attachments: - required: - - file_id - - tools - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/AssistantToolsCode' - file_search: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they should be added to.' - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ModifyMessageRequest: - type: object - properties: - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - DeleteMessageResponse: + description: "Static predicted output content, such as the content of a text file that is\nbeing regenerated.\n" + Project: required: - id - object - - deleted + - name + - created_at + - status type: object properties: id: type: string - deleted: - type: boolean + description: 'The identifier, which can be referenced in API endpoints' object: enum: - - thread.message.deleted - type: string - ListMessagesResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: + - organization.project type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/MessageObject' - first_id: + description: 'The object type, which is always `organization.project`' + name: type: string - example: msg_abc123 - last_id: + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was archived or `null`. + nullable: true + status: + enum: + - active + - archived type: string - example: msg_abc123 - has_more: - type: boolean - example: false - MessageContentImageFileObject: - title: Image file + description: '`active` or `archived`' + description: Represents an individual project. + x-oaiMeta: + name: The project object + example: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" + ProjectApiKey: required: - - type - - image_file + - object + - redacted_value + - name + - created_at + - id + - owner type: object properties: - type: + object: enum: - - image_file + - organization.project.api_key type: string - description: Always `image_file`. - image_file: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageDeltaContentImageFileObject: - title: Image file - required: - - index - - type - type: object - properties: - index: + description: 'The object type, which is always `organization.project.api_key`' + redacted_value: + type: string + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: type: integer - description: The index of the content part in the message. - type: - enum: - - image_file + description: The Unix timestamp (in seconds) of when the API key was created + id: type: string - description: Always `image_file`. - image_file: + description: 'The identifier, which can be referenced in API endpoints' + owner: type: object properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: + type: enum: - - auto - - low - - high + - user + - service_account type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageContentImageUrlObject: - title: Image URL + description: '`user` or `service_account`' + user: + $ref: '#/components/schemas/ProjectUser' + service_account: + $ref: '#/components/schemas/ProjectServiceAccount' + description: Represents an individual API key in a project. + x-oaiMeta: + name: The project API key object + example: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n }\n}\n" + ProjectApiKeyDeleteResponse: required: - - type - - image_url + - object + - id + - deleted type: object properties: - type: + object: enum: - - image_url + - organization.project.api_key.deleted type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: 'The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto`' - default: auto - description: References an image URL in the content of a message. - MessageDeltaContentImageUrlObject: - title: Image URL + id: + type: string + deleted: + type: boolean + ProjectApiKeyListResponse: required: - - index - - type + - object + - data + - first_id + - last_id + - has_more type: object properties: - index: - type: integer - description: The index of the content part in the message. - type: + object: enum: - - image_url - type: string - description: Always `image_url`. - image_url: - type: object - properties: - url: - type: string - description: 'The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: References an image URL in the content of a message. - MessageContentTextObject: - title: Text + - list + type: string + data: + type: array + items: + $ref: '#/components/schemas/ProjectApiKey' + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + ProjectCreateRequest: required: - - type - - text + - name type: object properties: - type: - enum: - - text + name: type: string - description: Always `text`. - text: - required: - - value - - annotations - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' - discriminator: - propertyName: type - mapping: - file_citation: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' - file_path: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageContentRefusalObject: - title: Refusal + description: 'The friendly name of the project, this name appears in reports.' + ProjectListResponse: required: - - type - - refusal + - object + - data + - first_id + - last_id + - has_more type: object properties: - type: + object: enum: - - refusal + - list type: string - description: Always `refusal`. - refusal: + data: + type: array + items: + $ref: '#/components/schemas/Project' + first_id: type: string - description: The refusal content generated by the assistant. - MessageRequestContentTextObject: - title: Text + last_id: + type: string + has_more: + type: boolean + ProjectRateLimit: required: - - type - - text + - object + - id + - model + - max_requests_per_1_minute + - max_tokens_per_1_minute type: object properties: - type: + object: enum: - - text + - project.rate_limit type: string - description: Always `text`. - text: + description: 'The object type, which is always `project.rate_limit`' + id: type: string - description: Text content to be sent to the model - description: The text content that is part of a message. - MessageContentTextAnnotationsFileCitationObject: - title: File citation + description: 'The identifier, which can be referenced in API endpoints.' + model: + type: string + description: The model this rate limit applies to. + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only present for relevant models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only present for relevant models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only present for relevant models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only present for relevant models. + description: Represents a project rate limit config. + x-oaiMeta: + name: The project rate limit object + example: "{\n \"object\": \"project.rate_limit\",\n \"id\": \"rl_ada\",\n \"model\": \"ada\",\n \"max_requests_per_1_minute\": 600,\n \"max_tokens_per_1_minute\": 150000,\n \"max_images_per_1_minute\": 10\n}\n" + ProjectRateLimitListResponse: required: - - type - - text - - file_citation - - start_index - - end_index + - object + - data + - first_id + - last_id + - has_more type: object properties: - type: + object: enum: - - file_citation + - list type: string - description: Always `file_citation`. - text: + data: + type: array + items: + $ref: '#/components/schemas/ProjectRateLimit' + first_id: type: string - description: The text in the message content that needs to be replaced. - file_citation: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - start_index: - minimum: 0 + last_id: + type: string + has_more: + type: boolean + ProjectRateLimitUpdateRequest: + type: object + properties: + max_requests_per_1_minute: type: integer - end_index: - minimum: 0 + description: The maximum requests per minute. + max_tokens_per_1_minute: type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageContentTextAnnotationsFilePathObject: - title: File path + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only relevant for certain models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only relevant for certain models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only relevant for certain models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only relevant for certain models. + ProjectServiceAccount: required: - - type - - text - - file_path - - start_index - - end_index + - object + - id + - name + - role + - created_at type: object properties: - type: + object: enum: - - file_path + - organization.project.service_account type: string - description: Always `file_path`. - text: + description: 'The object type, which is always `organization.project.service_account`' + id: type: string - description: The text in the message content that needs to be replaced. - file_path: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 + description: 'The identifier, which can be referenced in API endpoints' + name: + type: string + description: The name of the service account + role: + enum: + - owner + - member + type: string + description: '`owner` or `member`' + created_at: type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - MessageDeltaContentTextObject: - title: Text + description: The Unix timestamp (in seconds) of when the service account was created + description: Represents an individual service account in a project. + x-oaiMeta: + name: The project service account object + example: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" + ProjectServiceAccountApiKey: required: - - index - - type + - object + - value + - name + - created_at + - id type: object properties: - index: - type: integer - description: The index of the content part in the message. - type: + object: enum: - - text + - organization.project.service_account.api_key type: string - description: Always `text`. - text: - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' - discriminator: - propertyName: type - mapping: - file_citation: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' - file_path: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageDeltaContentRefusalObject: - title: Refusal + description: 'The object type, which is always `organization.project.service_account.api_key`' + value: + type: string + name: + type: string + created_at: + type: integer + id: + type: string + ProjectServiceAccountCreateRequest: required: - - index - - type + - name + type: object + properties: + name: + type: string + description: The name of the service account being created. + ProjectServiceAccountCreateResponse: + required: + - object + - id + - name + - role + - created_at + - api_key type: object properties: - index: - type: integer - description: The index of the refusal part in the message. - type: + object: enum: - - refusal + - organization.project.service_account type: string - description: Always `refusal`. - refusal: + id: type: string - description: The refusal content that is part of a message. - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation + name: + type: string + role: + enum: + - member + type: string + description: Service accounts can only have one role of type `member` + created_at: + type: integer + api_key: + $ref: '#/components/schemas/ProjectServiceAccountApiKey' + ProjectServiceAccountDeleteResponse: required: - - index - - type + - object + - id + - deleted type: object properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: + object: enum: - - file_citation + - organization.project.service_account.deleted type: string - description: Always `file_citation`. - text: + id: type: string - description: The text in the message content that needs to be replaced. - file_citation: - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - quote: - type: string - description: The specific quote in the file. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path + deleted: + type: boolean + ProjectServiceAccountListResponse: required: - - index - - type + - object + - data + - first_id + - last_id + - has_more type: object properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: + object: enum: - - file_path + - list type: string - description: Always `file_path`. - text: + data: + type: array + items: + $ref: '#/components/schemas/ProjectServiceAccount' + first_id: type: string - description: The text in the message content that needs to be replaced. - file_path: - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - RunStepObject: - title: Run steps + last_id: + type: string + has_more: + type: boolean + ProjectUpdateRequest: required: - - id - - object - - created_at - - assistant_id - - thread_id - - run_id - - type - - status - - step_details - - last_error - - expired_at - - cancelled_at - - failed_at - - completed_at - - metadata - - usage + - name type: object properties: - id: + name: type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' + description: 'The updated name of the project, this name appears in reports.' + ProjectUser: + required: + - object + - id + - name + - email + - role + - added_at + type: object + properties: object: enum: - - thread.run.step - type: string - description: 'The object type, which is always `thread.run.step`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was created. - assistant_id: + - organization.project.user type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) associated with the run step.' - thread_id: + description: 'The object type, which is always `organization.project.user`' + id: type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - run_id: + description: 'The identifier, which can be referenced in API endpoints' + name: type: string - description: 'The ID of the [run](/docs/api-reference/runs) that this run step is a part of.' - type: - enum: - - message_creation - - tool_calls + description: The name of the user + email: type: string - description: 'The type of run step, which can be either `message_creation` or `tool_calls`.' - status: + description: The email address of the user + role: enum: - - in_progress - - cancelled - - failed - - completed - - expired + - owner + - member type: string - description: 'The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.' - step_details: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' - description: The details of the run step. - discriminator: - propertyName: type - mapping: - message_creation: '#/components/schemas/RunStepDetailsMessageCreationObject' - tool_calls: '#/components/schemas/RunStepDetailsToolCallsObject' - x-oaiExpandable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run step. Will be `null` if there are no errors. - nullable: true - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step failed. - nullable: true - completed_at: + description: '`owner` or `member`' + added_at: type: integer - description: The Unix timestamp (in seconds) for when the run step completed. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunStepCompletionUsage' - description: "Represents a step in execution of a run.\n" + description: The Unix timestamp (in seconds) of when the project was added. + description: Represents an individual user in a project. x-oaiMeta: - name: The run step object - beta: true - example: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - RunStepDeltaObject: - title: Run step delta object + name: The project user object + example: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + ProjectUserCreateRequest: required: - - id - - object - - delta + - user_id + - role type: object properties: - id: + user_id: type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' + description: The ID of the user. + role: + enum: + - owner + - member + type: string + description: '`owner` or `member`' + ProjectUserDeleteResponse: + required: + - object + - id + - deleted + type: object + properties: object: enum: - - thread.run.step.delta + - organization.project.user.deleted type: string - description: 'The object type, which is always `thread.run.step.delta`.' - delta: - type: object - properties: - step_details: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' - description: The details of the run step. - discriminator: - propertyName: type - mapping: - message_creation: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' - tool_calls: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' - x-oaiExpandable: true - description: The delta containing the fields that have changed on the run step. - description: "Represents a run step delta i.e. any changed fields on a run step during streaming.\n" - x-oaiMeta: - name: The run step delta object - beta: true - example: "{\n \"id\": \"step_123\",\n \"object\": \"thread.run.step.delta\",\n \"delta\": {\n \"step_details\": {\n \"type\": \"tool_calls\",\n \"tool_calls\": [\n {\n \"index\": 0,\n \"id\": \"call_123\",\n \"type\": \"code_interpreter\",\n \"code_interpreter\": { \"input\": \"\", \"outputs\": [] }\n }\n ]\n }\n }\n}\n" - ListRunStepsResponse: + id: + type: string + deleted: + type: boolean + ProjectUserListResponse: required: - object - data - first_id - last_id - has_more + type: object properties: object: type: string - example: list data: type: array items: - $ref: '#/components/schemas/RunStepObject' + $ref: '#/components/schemas/ProjectUser' first_id: type: string - example: step_abc123 last_id: type: string - example: step_abc456 has_more: type: boolean - example: false - RunStepDetailsMessageCreationObject: - title: Message creation + ProjectUserUpdateRequest: + required: + - role + type: object + properties: + role: + enum: + - owner + - member + type: string + description: '`owner` or `member`' + RealtimeClientEventConversationItemCreate: required: - type - - message_creation + - item type: object properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: enum: - - message_creation + - conversation.item.create type: string - description: Always `message_creation`. - message_creation: - required: - - message_id - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation + description: 'The event type, must be `conversation.item.create`.' + previous_item_id: + type: string + description: "The ID of the preceding item after which the new item will be inserted. \nIf not set, the new item will be appended to the end of the conversation. \nIf set, it allows an item to be inserted mid-conversation. If the ID \ncannot be found, an error will be returned and the item will not be added.\n" + item: + $ref: '#/components/schemas/RealtimeConversationItem' + description: "Add a new Item to the Conversation's context, including messages, function \ncalls, and function call responses. This event can be used both to populate a \n\"history\" of the conversation and to add new items mid-stream, but has the \ncurrent limitation that it cannot populate assistant audio messages.\n\nIf successful, the server will respond with a `conversation.item.created` \nevent, otherwise an `error` event will be sent.\n" + x-oaiMeta: + name: conversation.item.create + group: realtime + example: "{\n \"event_id\": \"event_345\",\n \"type\": \"conversation.item.create\",\n \"previous_item_id\": null,\n \"item\": {\n \"id\": \"msg_001\",\n \"type\": \"message\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_text\",\n \"text\": \"Hello, how are you?\"\n }\n ]\n }\n}\n" + RealtimeClientEventConversationItemDelete: required: - type + - item_id type: object properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: enum: - - message_creation + - conversation.item.delete type: string - description: Always `message_creation`. - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDetailsToolCallsObject: - title: Tool calls + description: 'The event type, must be `conversation.item.delete`.' + item_id: + type: string + description: The ID of the item to delete. + description: "Send this event when you want to remove any item from the conversation \nhistory. The server will respond with a `conversation.item.deleted` event, \nunless the item does not exist in the conversation history, in which case the \nserver will respond with an error.\n" + x-oaiMeta: + name: conversation.item.delete + group: realtime + example: "{\n \"event_id\": \"event_901\",\n \"type\": \"conversation.item.delete\",\n \"item_id\": \"msg_003\"\n}\n" + RealtimeClientEventConversationItemTruncate: required: - type - - tool_calls + - item_id + - content_index + - audio_end_ms type: object properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: enum: - - tool_calls + - conversation.item.truncate type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/RunStepDetailsToolCallsCodeObject' - file_search: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' - function: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls + description: 'The event type, must be `conversation.item.truncate`.' + item_id: + type: string + description: "The ID of the assistant message item to truncate. Only assistant message \nitems can be truncated.\n" + content_index: + type: integer + description: The index of the content part to truncate. Set this to 0. + audio_end_ms: + type: integer + description: "Inclusive duration up to which audio is truncated, in milliseconds. If \nthe audio_end_ms is greater than the actual audio duration, the server \nwill respond with an error.\n" + description: "Send this event to truncate a previous assistant message’s audio. The server \nwill produce audio faster than realtime, so this event is useful when the user \ninterrupts to truncate audio that has already been sent to the client but not \nyet played. This will synchronize the server's understanding of the audio with \nthe client's playback.\n\nTruncating audio will delete the server-side text transcript to ensure there \nis not text in the context that hasn't been heard by the user.\n\nIf successful, the server will respond with a `conversation.item.truncated` \nevent. \n" + x-oaiMeta: + name: conversation.item.truncate + group: realtime + example: "{\n \"event_id\": \"event_678\",\n \"type\": \"conversation.item.truncate\",\n \"item_id\": \"msg_002\",\n \"content_index\": 0,\n \"audio_end_ms\": 1500\n}\n" + RealtimeClientEventInputAudioBufferAppend: required: - type + - audio type: object properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: enum: - - tool_calls + - input_audio_buffer.append type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' - discriminator: - propertyName: type - mapping: - code_interpreter: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' - file_search: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' - function: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call + description: 'The event type, must be `input_audio_buffer.append`.' + audio: + type: string + description: "Base64-encoded audio bytes. This must be in the format specified by the \n`input_audio_format` field in the session configuration.\n" + description: "Send this event to append audio bytes to the input audio buffer. The audio \nbuffer is temporary storage you can write to and later commit. In Server VAD \nmode, the audio buffer is used to detect speech and the server will decide \nwhen to commit. When Server VAD is disabled, you must commit the audio buffer\nmanually.\n\nThe client may choose how much audio to place in each event up to a maximum \nof 15 MiB, for example streaming smaller chunks from the client may allow the \nVAD to be more responsive. Unlike made other client events, the server will \nnot send a confirmation response to this event.\n" + x-oaiMeta: + name: input_audio_buffer.append + group: realtime + example: "{\n \"event_id\": \"event_456\",\n \"type\": \"input_audio_buffer.append\",\n \"audio\": \"Base64EncodedAudioData\"\n}\n" + RealtimeClientEventInputAudioBufferClear: required: - - id - type - - code_interpreter type: object properties: - id: + event_id: type: string - description: The ID of the tool call. + description: Optional client-generated ID used to identify this event. type: enum: - - code_interpreter + - input_audio_buffer.clear type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: - required: - - input - - outputs - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' - discriminator: - propertyName: type - mapping: - logs: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' - image: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call + description: 'The event type, must be `input_audio_buffer.clear`.' + description: "Send this event to clear the audio bytes in the buffer. The server will \nrespond with an `input_audio_buffer.cleared` event.\n" + x-oaiMeta: + name: input_audio_buffer.clear + group: realtime + example: "{\n \"event_id\": \"event_012\",\n \"type\": \"input_audio_buffer.clear\"\n}\n" + RealtimeClientEventInputAudioBufferCommit: required: - - index - type type: object properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + event_id: type: string - description: The ID of the tool call. + description: Optional client-generated ID used to identify this event. type: enum: - - code_interpreter + - input_audio_buffer.commit type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' - discriminator: - propertyName: type - mapping: - logs: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' - image: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output + description: 'The event type, must be `input_audio_buffer.commit`.' + description: "Send this event to commit the user input audio buffer, which will create a \nnew user message item in the conversation. This event will produce an error \nif the input audio buffer is empty. When in Server VAD mode, the client does \nnot need to send this event, the server will commit the audio buffer \nautomatically.\n\nCommitting the input audio buffer will trigger input audio transcription \n(if enabled in session configuration), but it will not create a response \nfrom the model. The server will respond with an `input_audio_buffer.committed` \nevent.\n" + x-oaiMeta: + name: input_audio_buffer.commit + group: realtime + example: "{\n \"event_id\": \"event_789\",\n \"type\": \"input_audio_buffer.commit\"\n}\n" + RealtimeClientEventResponseCancel: required: - type - - logs type: object properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: enum: - - logs + - response.cancel type: string - description: Always `logs`. - logs: + description: 'The event type, must be `response.cancel`.' + description: "Send this event to cancel an in-progress response. The server will respond \nwith a `response.cancelled` event or an error if there is no response to \ncancel.\n" + x-oaiMeta: + name: response.cancel + group: realtime + example: "{\n \"event_id\": \"event_567\",\n \"type\": \"response.cancel\"\n}\n" + RealtimeClientEventResponseCreate: + required: + - type + - response + type: object + properties: + event_id: type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output + description: Optional client-generated ID used to identify this event. + type: + enum: + - response.create + type: string + description: 'The event type, must be `response.create`.' + response: + $ref: '#/components/schemas/RealtimeSession' + description: "This event instructs the server to create a Response, which means triggering \nmodel inference. When in Server VAD mode, the server will create Responses \nautomatically.\n\nA Response will include at least one Item, and may have two, in which case \nthe second will be a function call. These Items will be appended to the \nconversation history.\n\nThe server will respond with a `response.created` event, events for Items \nand content created, and finally a `response.done` event to indicate the \nResponse is complete.\n\nThe `response.create` event includes inference configuration like \n`instructions`, and `temperature`. These fields will override the Session's \nconfiguration for this Response only.\n" + x-oaiMeta: + name: response.create + group: realtime + example: "{\n \"event_id\": \"event_234\",\n \"type\": \"response.create\",\n \"response\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"Please assist the user.\",\n \"voice\": \"sage\",\n \"output_audio_format\": \"pcm16\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"calculate_sum\",\n \"description\": \"Calculates the sum of two numbers.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": { \"type\": \"number\" },\n \"b\": { \"type\": \"number\" }\n },\n \"required\": [\"a\", \"b\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.7,\n \"max_output_tokens\": 150\n }\n}\n" + RealtimeClientEventSessionUpdate: required: - - index - type + - session type: object properties: - index: - type: integer - description: The index of the output in the outputs array. + event_id: + type: string + description: Optional client-generated ID used to identify this event. type: enum: - - logs + - session.update + type: string + description: 'The event type, must be `session.update`.' + session: + $ref: '#/components/schemas/RealtimeSession' + description: "Send this event to update the session’s default configuration. The client may \nsend this event at any time to update the session configuration, and any \nfield may be updated at any time, except for \"voice\". The server will respond \nwith a `session.updated` event that shows the full effective configuration. \nOnly fields that are present are updated, thus the correct way to clear a \nfield like \"instructions\" is to pass an empty string.\n" + x-oaiMeta: + name: session.update + group: realtime + example: "{\n \"event_id\": \"event_123\",\n \"type\": \"session.update\",\n \"session\": {\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"You are a helpful assistant.\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 500\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather...\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": { \"type\": \"string\" }\n },\n \"required\": [\"location\"]\n }\n }\n ],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": \"inf\"\n }\n}\n" + RealtimeConversationItem: + type: object + properties: + id: + type: string + description: "The unique ID of the item, this can be generated by the client to help \nmanage server-side context, but is not required because the server will \ngenerate one if not provided.\n" + type: + enum: + - message + - function_call + - function_call_output + type: string + description: "The type of the item (`message`, `function_call`, `function_call_output`).\n" + object: + enum: + - realtime.item + type: string + description: "Identifier for the API object being returned - always `realtime.item`.\n" + status: + enum: + - completed + - incomplete + type: string + description: "The status of the item (`completed`, `incomplete`). These have no effect \non the conversation, but are accepted for consistency with the \n`conversation.item.created` event.\n" + role: + enum: + - user + - assistant + - systems + type: string + description: "The role of the message sender (`user`, `assistant`, `system`), only \napplicable for `message` items.\n" + content: + type: array + items: + type: object + properties: + type: + enum: + - input_audio + - input_text + - text + type: string + description: 'The content type (`input_text`, `input_audio`, `text`).' + text: + type: string + description: "The text content, used for `input_text` and `text` content types.\n" + audio: + type: string + description: "Base64-encoded audio bytes, used for `input_audio` content type.\n" + transcript: + type: string + description: "The transcript of the audio, used for `input_audio` content type.\n" + x-oaiExpandable: true + description: "The content of the message, applicable for `message` items. \n- Message items of role `system` support only `input_text` content\n- Message items of role `user` support `input_text` and `input_audio` \n content\n- Message items of role `assistant` support `text` content.\n" + x-oaiExpandable: true + call_id: type: string - description: Always `logs`. - logs: + description: "The ID of the function call (for `function_call` and \n`function_call_output` items). If passed on a `function_call_output` \nitem, the server will check that a `function_call` item with the same \nID exists in the conversation history.\n" + name: type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output - required: - - type - - image + description: "The name of the function being called (for `function_call` items).\n" + arguments: + type: string + description: "The arguments of the function call (for `function_call` items).\n" + output: + type: string + description: "The output of the function call (for `function_call_output` items).\n" + description: The item to add to the conversation. + x-oaiExpandable: true + RealtimeResponse: type: object properties: - type: + id: + type: string + description: The unique ID of the response. + object: enum: - - image + - realtime.response type: string - description: Always `image`. - image: - required: - - file_id + description: 'The object type, must be `realtime.response`.' + status: + enum: + - completed + - cancelled + - failed + - incomplete + type: string + description: "The final status of the response (`completed`, `cancelled`, `failed`, or \n`incomplete`).\n" + status_details: type: object properties: - file_id: + type: + enum: + - completed + - cancelled + - failed + - incomplete type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output + description: "The type of error that caused the response to fail, corresponding \nwith the `status` field (`cancelled`, `incomplete`, `failed`).\n" + reason: + enum: + - turn_detected + - client_cancelled + - max_output_tokens + - content_filter + type: string + description: "The reason the Response did not complete. For a `cancelled` Response, \none of `turn_detected` (the server VAD detected a new start of speech) \nor `client_cancelled` (the client sent a cancel event). For an \n`incomplete` Response, one of `max_output_tokens` or `content_filter` \n(the server-side safety filter activated and cut off the response).\n" + error: + type: object + properties: + type: + type: string + description: The type of error. + code: + type: string + description: 'Error code, if any.' + description: "A description of the error that caused the response to fail, \npopulated when the `status` is `failed`.\n" + description: Additional details about the status. + output: + type: array + items: + $ref: '#/components/schemas/RealtimeConversationItem' + description: The list of output items generated by the response. + usage: + type: object + properties: + total_tokens: + type: integer + description: "The total number of tokens in the Response including input and output \ntext and audio tokens.\n" + input_tokens: + type: integer + description: "The number of input tokens used in the Response, including text and \naudio tokens.\n" + output_tokens: + type: integer + description: "The number of output tokens sent in the Response, including text and \naudio tokens.\n" + input_token_details: + type: object + properties: + cached_tokens: + type: integer + description: The number of cached tokens used in the Response. + text_tokens: + type: integer + description: The number of text tokens used in the Response. + audio_tokens: + type: integer + description: The number of audio tokens used in the Response. + description: Details about the input tokens used in the Response. + output_token_details: + type: object + properties: + text_tokens: + type: integer + description: The number of text tokens used in the Response. + audio_tokens: + type: integer + description: The number of audio tokens used in the Response. + description: Details about the output tokens used in the Response. + description: "Usage statistics for the Response, this will correspond to billing. A \nRealtime API session will maintain a conversation context and append new \nItems to the Conversation, thus output from previous turns (text and \naudio tokens) will become the input for later turns.\n" + description: The response resource. + RealtimeServerEventConversationCreated: required: - - index + - event_id - type + - conversation type: object properties: - index: - type: integer - description: The index of the output in the outputs array. + event_id: + type: string + description: The unique ID of the server event. type: enum: - - image + - conversation.created type: string - description: Always `image`. - image: + description: 'The event type, must be `conversation.created`.' + conversation: type: object properties: - file_id: + id: type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call + description: The unique ID of the conversation. + object: + type: string + description: 'The object type, must be `realtime.conversation`.' + description: The conversation resource. + description: "Returned when a conversation is created. Emitted right after session creation.\n" + x-oaiMeta: + name: conversation.created + group: realtime + example: "{\n \"event_id\": \"event_9101\",\n \"type\": \"conversation.created\",\n \"conversation\": {\n \"id\": \"conv_001\",\n \"object\": \"realtime.conversation\"\n }\n}\n" + RealtimeServerEventConversationItemCreated: required: - - id + - event_id - type - - file_search + - previous_item_id + - item type: object properties: - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: enum: - - file_search + - conversation.item.created type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call + description: 'The event type, must be `conversation.item.created`.' + previous_item_id: + type: string + description: "The ID of the preceding item in the Conversation context, allows the \nclient to understand the order of the conversation.\n" + item: + $ref: '#/components/schemas/RealtimeConversationItem' + description: "Returned when a conversation item is created. There are several scenarios that \nproduce this event:\n - The server is generating a Response, which if successful will produce \n either one or two Items, which will be of type `message` \n (role `assistant`) or type `function_call`.\n - The input audio buffer has been committed, either by the client or the \n server (in `server_vad` mode). The server will take the content of the \n input audio buffer and add it to a new user message Item.\n - The client has sent a `conversation.item.create` event to add a new Item \n to the Conversation.\n" + x-oaiMeta: + name: conversation.item.created + group: realtime + example: "{\n \"event_id\": \"event_1920\",\n \"type\": \"conversation.item.created\",\n \"previous_item_id\": \"msg_002\",\n \"item\": {\n \"id\": \"msg_003\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_audio\",\n \"transcript\": \"hello how are you\",\n \"audio\": \"base64encodedaudio==\"\n }\n ]\n }\n}\n" + RealtimeServerEventConversationItemDeleted: required: - - index + - event_id - type - - file_search + - item_id type: object properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: enum: - - file_search + - conversation.item.deleted type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDetailsToolCallsFunctionObject: - title: Function tool call + description: 'The event type, must be `conversation.item.deleted`.' + item_id: + type: string + description: The ID of the item that was deleted. + description: "Returned when an item in the conversation is deleted by the client with a \n`conversation.item.delete` event. This event is used to synchronize the \nserver's understanding of the conversation history with the client's view.\n" + x-oaiMeta: + name: conversation.item.deleted + group: realtime + example: "{\n \"event_id\": \"event_2728\",\n \"type\": \"conversation.item.deleted\",\n \"item_id\": \"msg_005\"\n}\n" + RealtimeServerEventConversationItemInputAudioTranscriptionCompleted: required: - - id + - event_id - type - - function + - item_id + - content_index + - transcript type: object properties: - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: enum: - - function + - conversation.item.input_audio_transcription.completed type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - required: - - name - - arguments - - output - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - RunStepDeltaStepDetailsToolCallsFunctionObject: - title: Function tool call + description: "The event type, must be\n`conversation.item.input_audio_transcription.completed`.\n" + item_id: + type: string + description: The ID of the user message item containing the audio. + content_index: + type: integer + description: The index of the content part containing the audio. + transcript: + type: string + description: The transcribed text. + description: "This event is the output of audio transcription for user audio written to the \nuser audio buffer. Transcription begins when the input audio buffer is \ncommitted by the client or server (in `server_vad` mode). Transcription runs \nasynchronously with Response creation, so this event may come before or after \nthe Response events.\n\nRealtime API models accept audio natively, and thus input transcription is a \nseparate process run on a separate ASR (Automatic Speech Recognition) model, \ncurrently always `whisper-1`. Thus the transcript may diverge somewhat from \nthe model's interpretation, and should be treated as a rough guide.\n" + x-oaiMeta: + name: conversation.item.input_audio_transcription.completed + group: realtime + example: "{\n \"event_id\": \"event_2122\",\n \"type\": \"conversation.item.input_audio_transcription.completed\",\n \"item_id\": \"msg_003\",\n \"content_index\": 0,\n \"transcript\": \"Hello, how are you?\"\n}\n" + RealtimeServerEventConversationItemInputAudioTranscriptionFailed: required: - - index + - event_id - type + - item_id + - content_index + - error type: object properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: + event_id: type: string - description: The ID of the tool call object. + description: The unique ID of the server event. type: enum: - - function + - conversation.item.input_audio_transcription.failed type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: + description: "The event type, must be\n`conversation.item.input_audio_transcription.failed`.\n" + item_id: + type: string + description: The ID of the user message item. + content_index: + type: integer + description: The index of the content part containing the audio. + error: type: object properties: - name: + type: type: string - description: The name of the function. - arguments: + description: The type of error. + code: type: string - description: The arguments passed to the function. - output: + description: 'Error code, if any.' + message: type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - VectorStoreExpirationAfter: - title: Vector store expiration policy + description: A human-readable error message. + param: + type: string + description: 'Parameter related to the error, if any.' + description: Details of the transcription error. + description: "Returned when input audio transcription is configured, and a transcription \nrequest for a user message failed. These events are separate from other \n`error` events so that the client can identify the related Item.\n" + x-oaiMeta: + name: conversation.item.input_audio_transcription.failed + group: realtime + example: "{\n \"event_id\": \"event_2324\",\n \"type\": \"conversation.item.input_audio_transcription.failed\",\n \"item_id\": \"msg_003\",\n \"content_index\": 0,\n \"error\": {\n \"type\": \"transcription_error\",\n \"code\": \"audio_unintelligible\",\n \"message\": \"The audio could not be transcribed.\",\n \"param\": null\n }\n}\n" + RealtimeServerEventConversationItemTruncated: required: - - anchor - - days + - event_id + - type + - item_id + - content_index + - audio_end_ms type: object properties: - anchor: + event_id: + type: string + description: The unique ID of the server event. + type: enum: - - last_active_at + - conversation.item.truncated type: string - description: 'Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.' - days: - maximum: 365 - minimum: 1 + description: 'The event type, must be `conversation.item.truncated`.' + item_id: + type: string + description: The ID of the assistant message item that was truncated. + content_index: type: integer - description: The number of days after the anchor time that the vector store will expire. - description: The expiration policy for a vector store. - VectorStoreObject: - title: Vector store + description: The index of the content part that was truncated. + audio_end_ms: + type: integer + description: "The duration up to which the audio was truncated, in milliseconds.\n" + description: "Returned when an earlier assistant audio message item is truncated by the \nclient with a `conversation.item.truncate` event. This event is used to \nsynchronize the server's understanding of the audio with the client's playback.\n\nThis action will truncate the audio and remove the server-side text transcript \nto ensure there is no text in the context that hasn't been heard by the user.\n" + x-oaiMeta: + name: conversation.item.truncated + group: realtime + example: "{\n \"event_id\": \"event_2526\",\n \"type\": \"conversation.item.truncated\",\n \"item_id\": \"msg_004\",\n \"content_index\": 0,\n \"audio_end_ms\": 1500\n}\n" + RealtimeServerEventError: required: - - id - - object - - usage_bytes - - created_at - - status - - last_active_at - - name - - file_counts - - metadata + - event_id + - type + - error type: object properties: - id: + event_id: type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: + description: The unique ID of the server event. + type: enum: - - vector_store - type: string - description: 'The object type, which is always `vector_store`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store was created. - name: + - error type: string - description: The name of the vector store. - usage_bytes: - type: integer - description: The total number of bytes used by the files in the vector store. - file_counts: - required: - - in_progress - - completed - - failed - - cancelled - - total + description: 'The event type, must be `error`.' + error: type: object properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been successfully processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that were cancelled. - total: - type: integer - description: The total number of files. - status: + type: + type: string + description: "The type of error (e.g., \"invalid_request_error\", \"server_error\").\n" + code: + type: string + description: 'Error code, if any.' + message: + type: string + description: A human-readable error message. + param: + type: string + description: 'Parameter related to the error, if any.' + event_id: + type: string + description: "The event_id of the client event that caused the error, if applicable.\n" + description: Details of the error. + description: "Returned when an error occurs, which could be a client problem or a server \nproblem. Most errors are recoverable and the session will stay open, we \nrecommend to implementors to monitor and log error messages by default.\n" + x-oaiMeta: + name: error + group: realtime + example: "{\n \"event_id\": \"event_890\",\n \"type\": \"error\",\n \"error\": {\n \"type\": \"invalid_request_error\",\n \"code\": \"invalid_event\",\n \"message\": \"The 'type' field is missing.\",\n \"param\": null,\n \"event_id\": \"event_567\"\n }\n}\n" + RealtimeServerEventInputAudioBufferCleared: + required: + - event_id + - type + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: enum: - - expired - - in_progress - - completed + - input_audio_buffer.cleared type: string - description: 'The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use.' - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store will expire. - nullable: true - last_active_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store was last active. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: A vector store is a collection of processed files can be used by the `file_search` tool. + description: 'The event type, must be `input_audio_buffer.cleared`.' + description: "Returned when the input audio buffer is cleared by the client with a \n`input_audio_buffer.clear` event.\n" x-oaiMeta: - name: The vector store object - beta: true - example: "{\n \"id\": \"vs_123\",\n \"object\": \"vector_store\",\n \"created_at\": 1698107661,\n \"usage_bytes\": 123456,\n \"last_active_at\": 1698107661,\n \"name\": \"my_vector_store\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"cancelled\": 0,\n \"failed\": 0,\n \"total\": 100\n },\n \"metadata\": {},\n \"last_used_at\": 1698107661\n}\n" - CreateVectorStoreRequest: + name: input_audio_buffer.cleared + group: realtime + example: "{\n \"event_id\": \"event_1314\",\n \"type\": \"input_audio_buffer.cleared\"\n}\n" + RealtimeServerEventInputAudioBufferCommitted: + required: + - event_id + - type + - previous_item_id + - item_id type: object properties: - file_ids: - maxItems: 500 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - name: + event_id: type: string - description: The name of the vector store. - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.' - discriminator: - propertyName: type - mapping: - auto: '#/components/schemas/AutoChunkingStrategyRequestParam' - static: '#/components/schemas/StaticChunkingStrategyRequestParam' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - UpdateVectorStoreRequest: + description: The unique ID of the server event. + type: + enum: + - input_audio_buffer.committed + type: string + description: 'The event type, must be `input_audio_buffer.committed`.' + previous_item_id: + type: string + description: "The ID of the preceding item after which the new item will be inserted.\n" + item_id: + type: string + description: The ID of the user message item that will be created. + description: "Returned when an input audio buffer is committed, either by the client or \nautomatically in server VAD mode. The `item_id` property is the ID of the user\nmessage item that will be created, thus a `conversation.item.created` event \nwill also be sent to the client.\n" + x-oaiMeta: + name: input_audio_buffer.committed + group: realtime + example: "{\n \"event_id\": \"event_1121\",\n \"type\": \"input_audio_buffer.committed\",\n \"previous_item_id\": \"msg_001\",\n \"item_id\": \"msg_002\"\n}\n" + RealtimeServerEventInputAudioBufferSpeechStarted: + required: + - event_id + - type + - audio_start_ms + - item_id type: object properties: - name: + event_id: type: string - description: The name of the vector store. - nullable: true - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ListVectorStoresResponse: + description: The unique ID of the server event. + type: + enum: + - input_audio_buffer.speech_started + type: string + description: 'The event type, must be `input_audio_buffer.speech_started`.' + audio_start_ms: + type: integer + description: "Milliseconds from the start of all audio written to the buffer during the \nsession when speech was first detected. This will correspond to the \nbeginning of audio sent to the model, and thus includes the \n`prefix_padding_ms` configured in the Session.\n" + item_id: + type: string + description: "The ID of the user message item that will be created when speech stops.\n" + description: "Sent by the server when in `server_vad` mode to indicate that speech has been \ndetected in the audio buffer. This can happen any time audio is added to the \nbuffer (unless speech is already detected). The client may want to use this \nevent to interrupt audio playback or provide visual feedback to the user. \n\nThe client should expect to receive a `input_audio_buffer.speech_stopped` event \nwhen speech stops. The `item_id` property is the ID of the user message item \nthat will be created when speech stops and will also be included in the \n`input_audio_buffer.speech_stopped` event (unless the client manually commits \nthe audio buffer during VAD activation).\n" + x-oaiMeta: + name: input_audio_buffer.speech_started + group: realtime + example: "{\n \"event_id\": \"event_1516\",\n \"type\": \"input_audio_buffer.speech_started\",\n \"audio_start_ms\": 1000,\n \"item_id\": \"msg_003\"\n}\n" + RealtimeServerEventInputAudioBufferSpeechStopped: required: - - object - - data - - first_id - - last_id - - has_more + - event_id + - type + - audio_end_ms + - item_id + type: object properties: - object: + event_id: type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreObject' - first_id: + description: The unique ID of the server event. + type: + enum: + - input_audio_buffer.speech_stopped type: string - example: vs_abc123 - last_id: + description: 'The event type, must be `input_audio_buffer.speech_stopped`.' + audio_end_ms: + type: integer + description: "Milliseconds since the session started when speech stopped. This will \ncorrespond to the end of audio sent to the model, and thus includes the \n`min_silence_duration_ms` configured in the Session.\n" + item_id: type: string - example: vs_abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreResponse: + description: The ID of the user message item that will be created. + description: "Returned in `server_vad` mode when the server detects the end of speech in \nthe audio buffer. The server will also send an `conversation.item.created` \nevent with the user message item that is created from the audio buffer.\n" + x-oaiMeta: + name: input_audio_buffer.speech_stopped + group: realtime + example: "{\n \"event_id\": \"event_1718\",\n \"type\": \"input_audio_buffer.speech_stopped\",\n \"audio_end_ms\": 2000,\n \"item_id\": \"msg_003\"\n}\n" + RealtimeServerEventRateLimitsUpdated: required: - - id - - object - - deleted + - event_id + - type + - rate_limits type: object properties: - id: + event_id: type: string - deleted: - type: boolean - object: + description: The unique ID of the server event. + type: enum: - - vector_store.deleted + - rate_limits.updated type: string - VectorStoreFileObject: - title: Vector store files + description: 'The event type, must be `rate_limits.updated`.' + rate_limits: + type: array + items: + type: object + properties: + name: + type: string + description: 'The name of the rate limit (`requests`, `tokens`).' + limit: + type: integer + description: The maximum allowed value for the rate limit. + remaining: + type: integer + description: The remaining value before the limit is reached. + reset_seconds: + type: number + description: Seconds until the rate limit resets. + description: List of rate limit information. + description: "Emitted at the beginning of a Response to indicate the updated rate limits. \nWhen a Response is created some tokens will be \"reserved\" for the output \ntokens, the rate limits shown here reflect that reservation, which is then \nadjusted accordingly once the Response is completed.\n" + x-oaiMeta: + name: rate_limits.updated + group: realtime + example: "{\n \"event_id\": \"event_5758\",\n \"type\": \"rate_limits.updated\",\n \"rate_limits\": [\n {\n \"name\": \"requests\",\n \"limit\": 1000,\n \"remaining\": 999,\n \"reset_seconds\": 60\n },\n {\n \"name\": \"tokens\",\n \"limit\": 50000,\n \"remaining\": 49950,\n \"reset_seconds\": 60\n }\n ]\n}\n" + RealtimeServerEventResponseAudioDelta: required: - - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta type: object properties: - id: + event_id: type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: + description: The unique ID of the server event. + type: enum: - - vector_store.file + - response.audio.delta type: string - description: 'The object type, which is always `vector_store.file`.' - usage_bytes: + description: 'The event type, must be `response.audio.delta`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: type: integer - description: The total vector store usage in bytes. Note that this may be different from the original file size. - created_at: + description: The index of the output item in the response. + content_index: type: integer - description: The Unix timestamp (in seconds) for when the vector store file was created. - vector_store_id: + description: The index of the content part in the item's content array. + delta: type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: + description: Base64-encoded audio data delta. + description: Returned when the model-generated audio is updated. + x-oaiMeta: + name: response.audio.delta + group: realtime + example: "{\n \"event_id\": \"event_4950\",\n \"type\": \"response.audio.delta\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"Base64EncodedAudioDelta\"\n}\n" + RealtimeServerEventResponseAudioDone: + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: enum: - - in_progress - - completed - - cancelled - - failed + - response.audio.done type: string - description: 'The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use.' - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - unsupported_file - - invalid_file - type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/StaticChunkingStrategyResponseParam' - - $ref: '#/components/schemas/OtherChunkingStrategyResponseParam' - description: The strategy used to chunk the file. - discriminator: - propertyName: type - mapping: - static: '#/components/schemas/StaticChunkingStrategyResponseParam' - other: '#/components/schemas/OtherChunkingStrategyResponseParam' - x-oaiExpandable: true - description: A list of files attached to a vector store. + description: 'The event type, must be `response.audio.done`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + description: "Returned when the model-generated audio is done. Also emitted when a Response\nis interrupted, incomplete, or cancelled.\n" x-oaiMeta: - name: The vector store file object - beta: true - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"usage_bytes\": 1234,\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"last_error\": null,\n \"chunking_strategy\": {\n \"type\": \"static\",\n \"static\": {\n \"max_chunk_size_tokens\": 800,\n \"chunk_overlap_tokens\": 400\n }\n }\n}\n" - OtherChunkingStrategyResponseParam: - title: Other Chunking Strategy + name: response.audio.done + group: realtime + example: "{\n \"event_id\": \"event_5152\",\n \"type\": \"response.audio.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0\n}\n" + RealtimeServerEventResponseAudioTranscriptDelta: required: + - event_id - type + - response_id + - item_id + - output_index + - content_index + - delta type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: enum: - - other + - response.audio_transcript.delta type: string - description: Always `other`. - additionalProperties: false - description: 'This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.' - StaticChunkingStrategyResponseParam: - title: Static Chunking Strategy + description: 'The event type, must be `response.audio_transcript.delta`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The transcript delta. + description: "Returned when the model-generated transcription of audio output is updated.\n" + x-oaiMeta: + name: response.audio_transcript.delta + group: realtime + example: "{\n \"event_id\": \"event_4546\",\n \"type\": \"response.audio_transcript.delta\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"Hello, how can I a\"\n}\n" + RealtimeServerEventResponseAudioTranscriptDone: required: + - event_id - type - - static + - response_id + - item_id + - output_index + - content_index + - transcript type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: enum: - - static + - response.audio_transcript.done type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - StaticChunkingStrategy: + description: 'The event type, must be `response.audio_transcript.done`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + transcript: + type: string + description: The final transcript of the audio. + description: "Returned when the model-generated transcription of audio output is done\nstreaming. Also emitted when a Response is interrupted, incomplete, or\ncancelled.\n" + x-oaiMeta: + name: response.audio_transcript.done + group: realtime + example: "{\n \"event_id\": \"event_4748\",\n \"type\": \"response.audio_transcript.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_008\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"transcript\": \"Hello, how can I assist you today?\"\n}\n" + RealtimeServerEventResponseContentPartAdded: + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - response.content_part.added + type: string + description: 'The event type, must be `response.content_part.added`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item to which the content part was added. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + part: + type: object + properties: + type: + enum: + - audio + - text + type: string + description: 'The content type ("text", "audio").' + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + description: The content part that was added. + description: "Returned when a new content part is added to an assistant message item during\nresponse generation.\n" + x-oaiMeta: + name: response.content_part.added + group: realtime + example: "{\n \"event_id\": \"event_3738\",\n \"type\": \"response.content_part.added\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"part\": {\n \"type\": \"text\",\n \"text\": \"\"\n }\n}\n" + RealtimeServerEventResponseContentPartDone: required: - - max_chunk_size_tokens - - chunk_overlap_tokens + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part type: object properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - response.content_part.done + type: string + description: 'The event type, must be `response.content_part.done`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: + description: The index of the output item in the response. + content_index: type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - AutoChunkingStrategyRequestParam: - title: Auto Chunking Strategy + description: The index of the content part in the item's content array. + part: + type: object + properties: + type: + type: string + description: 'The content type ("text", "audio").' + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + description: The content part that is done. + description: "Returned when a content part is done streaming in an assistant message item.\nAlso emitted when a Response is interrupted, incomplete, or cancelled.\n" + x-oaiMeta: + name: response.content_part.done + group: realtime + example: "{\n \"event_id\": \"event_3940\",\n \"type\": \"response.content_part.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"part\": {\n \"type\": \"text\",\n \"text\": \"Sure, I can help with that.\"\n }\n}\n" + RealtimeServerEventResponseCreated: required: + - event_id - type + - response type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: enum: - - auto + - response.created type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - StaticChunkingStrategyRequestParam: - title: Static Chunking Strategy + description: 'The event type, must be `response.created`.' + response: + $ref: '#/components/schemas/RealtimeResponse' + description: "Returned when a new Response is created. The first event of response creation,\nwhere the response is in an initial state of `in_progress`.\n" + x-oaiMeta: + name: response.created + group: realtime + example: "{\n \"event_id\": \"event_2930\",\n \"type\": \"response.created\",\n \"response\": {\n \"id\": \"resp_001\",\n \"object\": \"realtime.response\",\n \"status\": \"in_progress\",\n \"status_details\": null,\n \"output\": [],\n \"usage\": null\n }\n}\n" + RealtimeServerEventResponseDone: required: + - event_id - type - - static + - response type: object properties: + event_id: + type: string + description: The unique ID of the server event. type: enum: - - static + - response.done type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - ChunkingStrategyRequestParam: - type: object - oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - discriminator: - propertyName: type - mapping: - auto: '#/components/schemas/AutoChunkingStrategyRequestParam' - static: '#/components/schemas/StaticChunkingStrategyRequestParam' - x-oaiExpandable: true - CreateVectorStoreFileRequest: + description: 'The event type, must be `response.done`.' + response: + $ref: '#/components/schemas/RealtimeResponse' + description: "Returned when a Response is done streaming. Always emitted, no matter the \nfinal state. The Response object included in the `response.done` event will \ninclude all output Items in the Response but will omit the raw audio data.\n" + x-oaiMeta: + name: response.done + group: realtime + example: "{\n \"event_id\": \"event_3132\",\n \"type\": \"response.done\",\n \"response\": {\n \"id\": \"resp_001\",\n \"object\": \"realtime.response\",\n \"status\": \"completed\",\n \"status_details\": null,\n \"output\": [\n {\n \"id\": \"msg_006\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Sure, how can I assist you today?\"\n }\n ]\n }\n ],\n \"usage\": {\n \"total_tokens\":275,\n \"input_tokens\":127,\n \"output_tokens\":148,\n \"input_token_details\": {\n \"cached_tokens\":384,\n \"text_tokens\":119,\n \"audio_tokens\":8,\n \"cached_tokens_details\": {\n \"text_tokens\": 128,\n \"audio_tokens\": 256\n }\n },\n \"output_token_details\": {\n \"text_tokens\":36,\n \"audio_tokens\":112\n }\n }\n }\n}\n" + RealtimeServerEventResponseFunctionCallArgumentsDelta: required: - - file_id + - event_id + - type + - response_id + - item_id + - output_index + - call_id + - delta type: object properties: - file_id: + event_id: type: string - description: 'A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - ListVectorStoreFilesResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: + description: The unique ID of the server event. + type: + enum: + - response.function_call_arguments.delta type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreFileObject' - first_id: + description: "The event type, must be `response.function_call_arguments.delta`.\n" + response_id: type: string - example: file-abc123 - last_id: + description: The ID of the response. + item_id: type: string - example: file-abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreFileResponse: - required: - - id - - object - - deleted - type: object - properties: - id: + description: The ID of the function call item. + output_index: + type: integer + description: The index of the output item in the response. + call_id: type: string - deleted: - type: boolean - object: - enum: - - vector_store.file.deleted + description: The ID of the function call. + delta: type: string - VectorStoreFileBatchObject: - title: Vector store file batch + description: The arguments delta as a JSON string. + description: "Returned when the model-generated function call arguments are updated.\n" + x-oaiMeta: + name: response.function_call_arguments.delta + group: realtime + example: "{\n \"event_id\": \"event_5354\",\n \"type\": \"response.function_call_arguments.delta\",\n \"response_id\": \"resp_002\",\n \"item_id\": \"fc_001\",\n \"output_index\": 0,\n \"call_id\": \"call_001\",\n \"delta\": \"{\\\"location\\\": \\\"San\\\"\"\n}\n" + RealtimeServerEventResponseFunctionCallArgumentsDone: required: - - id - - object - - created_at - - vector_store_id - - status - - file_counts + - event_id + - type + - response_id + - item_id + - output_index + - call_id + - arguments type: object properties: - id: + event_id: type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: + description: The unique ID of the server event. + type: enum: - - vector_store.files_batch + - response.function_call_arguments.done type: string - description: 'The object type, which is always `vector_store.file_batch`.' - created_at: + description: "The event type, must be `response.function_call_arguments.done`.\n" + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the function call item. + output_index: type: integer - description: The Unix timestamp (in seconds) for when the vector store files batch was created. - vector_store_id: + description: The index of the output item in the response. + call_id: type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: - enum: - - in_progress - - completed - - cancelled - - failed + description: The ID of the function call. + arguments: type: string - description: 'The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`.' - file_counts: - required: - - in_progress - - completed - - cancelled - - failed - - total - type: object - properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that where cancelled. - total: - type: integer - description: The total number of files. - description: A batch of files attached to a vector store. + description: The final arguments as a JSON string. + description: "Returned when the model-generated function call arguments are done streaming.\nAlso emitted when a Response is interrupted, incomplete, or cancelled.\n" x-oaiMeta: - name: The vector store files batch object - beta: true - example: "{\n \"id\": \"vsfb_123\",\n \"object\": \"vector_store.files_batch\",\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 100\n }\n}\n" - CreateVectorStoreFileBatchRequest: + name: response.function_call_arguments.done + group: realtime + example: "{\n \"event_id\": \"event_5556\",\n \"type\": \"response.function_call_arguments.done\",\n \"response_id\": \"resp_002\",\n \"item_id\": \"fc_001\",\n \"output_index\": 0,\n \"call_id\": \"call_001\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco\\\"}\"\n}\n" + RealtimeServerEventResponseOutputItemAdded: required: - - file_ids + - event_id + - type + - response_id + - output_index + - item type: object properties: - file_ids: - maxItems: 500 - minItems: 1 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - AssistantStreamEvent: - oneOf: - - $ref: '#/components/schemas/ErrorEvent' - - $ref: '#/components/schemas/DoneEvent' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.created - type: string - data: - $ref: '#/components/schemas/ThreadObject' - description: 'Occurs when a new [thread](/docs/api-reference/threads/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [thread](/docs/api-reference/threads/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.created - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a new [run](/docs/api-reference/runs/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.queued - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.in_progress - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.requires_action - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - response.output_item.added + type: string + description: 'The event type, must be `response.output_item.added`.' + response_id: + type: string + description: The ID of the Response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the Response. + item: + $ref: '#/components/schemas/RealtimeConversationItem' + description: Returned when a new Item is created during Response generation. + x-oaiMeta: + name: response.output_item.added + group: realtime + example: "{\n \"event_id\": \"event_3334\",\n \"type\": \"response.output_item.added\",\n \"response_id\": \"resp_001\",\n \"output_index\": 0,\n \"item\": {\n \"id\": \"msg_007\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"in_progress\",\n \"role\": \"assistant\",\n \"content\": []\n }\n}\n" + RealtimeServerEventResponseOutputItemDone: + required: + - event_id + - type + - response_id + - output_index + - item + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - response.output_item.done + type: string + description: 'The event type, must be `response.output_item.done`.' + response_id: + type: string + description: The ID of the Response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the Response. + item: + $ref: '#/components/schemas/RealtimeConversationItem' + description: "Returned when an Item is done streaming. Also emitted when a Response is \ninterrupted, incomplete, or cancelled.\n" + x-oaiMeta: + name: response.output_item.done + group: realtime + example: "{\n \"event_id\": \"event_3536\",\n \"type\": \"response.output_item.done\",\n \"response_id\": \"resp_001\",\n \"output_index\": 0,\n \"item\": {\n \"id\": \"msg_007\",\n \"object\": \"realtime.item\",\n \"type\": \"message\",\n \"status\": \"completed\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Sure, I can help with that.\"\n }\n ]\n }\n}\n" + RealtimeServerEventResponseTextDelta: + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - response.text.delta + type: string + description: 'The event type, must be `response.text.delta`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The text delta. + description: Returned when the text value of a "text" content part is updated. + x-oaiMeta: + name: response.text.delta + group: realtime + example: "{\n \"event_id\": \"event_4142\",\n \"type\": \"response.text.delta\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"Sure, I can h\"\n}\n" + RealtimeServerEventResponseTextDone: + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - text + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - response.text.done + type: string + description: 'The event type, must be `response.text.done`.' + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + text: + type: string + description: The final text content. + description: "Returned when the text value of a \"text\" content part is done streaming. Also\nemitted when a Response is interrupted, incomplete, or cancelled.\n" + x-oaiMeta: + name: response.text.done + group: realtime + example: "{\n \"event_id\": \"event_4344\",\n \"type\": \"response.text.done\",\n \"response_id\": \"resp_001\",\n \"item_id\": \"msg_007\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"text\": \"Sure, I can help with that.\"\n}\n" + RealtimeServerEventSessionCreated: + required: + - event_id + - type + - session + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - session.created + type: string + description: 'The event type, must be `session.created`.' + session: + $ref: '#/components/schemas/RealtimeSession' + description: "Returned when a Session is created. Emitted automatically when a new \nconnection is established as the first server event. This event will contain \nthe default Session configuration.\n" + x-oaiMeta: + name: session.created + group: realtime + example: "{\n \"event_id\": \"event_1234\",\n \"type\": \"session.created\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\", \"audio\"],\n \"instructions\": \"...model instructions here...\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": null,\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"tools\": [],\n \"tool_choice\": \"auto\",\n \"temperature\": 0.8,\n \"max_response_output_tokens\": \"inf\"\n }\n}\n" + RealtimeServerEventSessionUpdated: + required: + - event_id + - type + - session + type: object + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + enum: + - session.updated + type: string + description: 'The event type, must be `session.updated`.' + session: + $ref: '#/components/schemas/RealtimeSession' + description: "Returned when a session is updated with a `session.update` event, unless \nthere is an error.\n" + x-oaiMeta: + name: session.updated + group: realtime + example: "{\n \"event_id\": \"event_5678\",\n \"type\": \"session.updated\",\n \"session\": {\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-4o-realtime-preview-2024-10-01\",\n \"modalities\": [\"text\"],\n \"instructions\": \"New instructions\",\n \"voice\": \"sage\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": null,\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"max_response_output_tokens\": 200\n }\n}\n" + RealtimeSession: + type: object + properties: + modalities: + items: + enum: + - text + - audio + type: string + description: "The set of modalities the model can respond with. To disable audio,\nset this to [\"text\"].\n" + instructions: + type: string + description: "The default system instructions (i.e. system message) prepended to model \ncalls. This field allows the client to guide the model on desired \nresponses. The model can be instructed on response content and format, \n(e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good \nresponses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion \ninto your voice\", \"laugh frequently\"). The instructions are not guaranteed \nto be followed by the model, but they provide guidance to the model on the \ndesired behavior.\n\nNote that the server sets default instructions which will be used if this \nfield is not set and are visible in the `session.created` event at the \nstart of the session.\n" + voice: + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + type: string + description: "The voice the model uses to respond. Current voice options are `ash`, \n`ballad`, `coral`, `sage`, and `verse`. \n\nAlso supported but not recommended are `alloy`, `echo`, and `shimmer`. \nThese older voices are less expressive. \n\nVoice cannot be changed during the session once the model has \nresponded with audio at least once.\n" + input_audio_format: + type: string + description: "The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n" + output_audio_format: + type: string + description: "The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n" + input_audio_transcription: type: object properties: - event: - enum: - - thread.run.completed + model: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data + description: "The model to use for transcription, `whisper-1` is the only currently \nsupported model.\n" + description: "Configuration for input audio transcription, defaults to off and can be \nset to `null` to turn off once on. Input audio transcription is not native \nto the model, since the model consumes audio directly. Transcription runs \nasynchronously through Whisper and should be treated as rough guidance \nrather than the representation understood by the model.\n" + turn_detection: type: object properties: - event: - enum: - - thread.run.incomplete + type: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.failed + description: "Type of turn detection, only `server_vad` is currently supported.\n" + threshold: + type: number + description: "Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A \nhigher threshold will require louder audio to activate the model, and \nthus might perform better in noisy environments.\n" + prefix_padding_ms: + type: integer + description: "Amount of audio to include before the VAD detected speech (in \nmilliseconds). Defaults to 300ms.\n" + silence_duration_ms: + type: integer + description: "Duration of silence to detect speech stop (in milliseconds). Defaults \nto 500ms. With shorter values the model will respond more quickly, \nbut may jump in on short pauses from the user.\n" + description: "Configuration for turn detection. Can be set to `null` to turn off. Server \nVAD means that the model will detect the start and end of speech based on \naudio volume and respond at the end of user speech.\n" + tools: + type: array + items: + type: object + properties: + type: + enum: + - function + type: string + description: 'The type of the tool, i.e. `function`.' + name: + type: string + description: The name of the function. + description: + type: string + description: "The description of the function, including guidance on when and how \nto call it, and guidance about what to tell the user when calling \n(if anything).\n" + parameters: + type: object + description: Parameters of the function in JSON Schema. + description: Tools (functions) available to the model. + tool_choice: + type: string + description: "How the model chooses tools. Options are `auto`, `none`, `required`, or \nspecify a function.\n" + temperature: + type: number + description: "Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.\n" + max_response_output_tokens: + oneOf: + - type: integer + - enum: + - inf type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data + description: "Maximum number of output tokens for a single assistant response,\ninclusive of tool calls. Provide an integer between 1 and 4096 to\nlimit output tokens, or `inf` for the maximum available tokens for a\ngiven model. Defaults to `inf`.\n" + description: Realtime session object configuration. + ResponseFormatJsonObject: + required: + - type + type: object + properties: + type: + enum: + - json_object + type: string + description: 'The type of response format being defined: `json_object`' + ResponseFormatJsonSchema: + required: + - type + - json_schema + type: object + properties: + type: + enum: + - json_schema + type: string + description: 'The type of response format being defined: `json_schema`' + json_schema: + required: + - type + - name type: object properties: - event: - enum: - - thread.run.cancelling + description: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.cancelled + description: 'A description of what the response format is for, used by the model to determine how to respond in the format.' + name: type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data + description: 'The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' + schema: + $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' + strict: + type: boolean + description: 'Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs).' + default: false + nullable: true + ResponseFormatJsonSchemaSchema: + type: object + description: 'The schema for the response format, described as a JSON Schema object.' + ResponseFormatText: + required: + - type + type: object + properties: + type: + enum: + - text + type: string + description: 'The type of response format being defined: `text`' + RunCompletionUsage: + required: + - prompt_tokens + - completion_tokens + - total_tokens + type: object + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + description: 'Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.).' + nullable: true + RunObject: + title: A run on a thread + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format + type: object + properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' + object: + enum: + - thread.run + type: string + description: 'The object type, which is always `thread.run`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was created. + thread_id: + type: string + description: 'The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run.' + assistant_id: + type: string + description: 'The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run.' + status: + enum: + - queued + - in_progress + - requires_action + - cancelling + - cancelled + - failed + - completed + - incomplete + - expired + type: string + description: 'The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`.' + required_action: + required: + - type + - submit_tool_outputs type: object properties: - event: + type: enum: - - thread.run.expired + - submit_tool_outputs type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data + description: 'For now, this is always `submit_tool_outputs`.' + submit_tool_outputs: + required: + - tool_calls + type: object + properties: + tool_calls: + type: array + items: + $ref: '#/components/schemas/RunToolCallObject' + description: A list of the relevant tool calls. + description: Details on the tool outputs needed for this run to continue. + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + last_error: + required: + - code + - message type: object properties: - event: + code: enum: - - thread.run.step.created + - server_error + - rate_limit_exceeded + - invalid_prompt type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.in_progress + description: 'One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.' + message: type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data + description: A human-readable description of the error. + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the run will expire. + nullable: true + started_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was started. + nullable: true + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was cancelled. + nullable: true + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run failed. + nullable: true + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run was completed. + nullable: true + incomplete_details: type: object properties: - event: + reason: enum: - - thread.run.step.delta + - max_completion_tokens + - max_prompt_tokens type: string - data: - $ref: '#/components/schemas/RunStepDeltaObject' - description: 'Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)' - - required: - - event - - data + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. + nullable: true + model: + type: string + description: 'The model that the [assistant](/docs/api-reference/assistants) used for this run.' + instructions: + type: string + description: 'The instructions that the [assistant](/docs/api-reference/assistants) used for this run.' + tools: + maxItems: 20 + type: array + items: + oneOf: + - $ref: '#/components/schemas/AssistantToolsCode' + - $ref: '#/components/schemas/AssistantToolsFileSearch' + - $ref: '#/components/schemas/AssistantToolsFunction' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/AssistantToolsCode' + file_search: '#/components/schemas/AssistantToolsFileSearch' + function: '#/components/schemas/AssistantToolsFunction' + x-oaiExpandable: true + description: 'The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.' + metadata: type: object - properties: - event: - enum: - - thread.run.step.completed - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + usage: + $ref: '#/components/schemas/RunCompletionUsage' + temperature: + type: number + description: 'The sampling temperature used for this run. If not set, defaults to 1.' + nullable: true + top_p: + type: number + description: 'The nucleus sampling value used for this run. If not set, defaults to 1.' + nullable: true + max_prompt_tokens: + minimum: 256 + type: integer + description: "The maximum number of prompt tokens specified to have been used over the course of the run.\n" + nullable: true + max_completion_tokens: + minimum: 256 + type: integer + description: "The maximum number of completion tokens specified to have been used over the course of the run.\n" + nullable: true + truncation_strategy: + $ref: '#/components/schemas/TruncationObject' + tool_choice: + $ref: '#/components/schemas/AssistantsApiToolChoiceOption' + parallel_tool_calls: + $ref: '#/components/schemas/ParallelToolCalls' + response_format: + $ref: '#/components/schemas/AssistantsApiResponseFormatOption' + description: 'Represents an execution run on a [thread](/docs/api-reference/threads).' + x-oaiMeta: + name: The run object + beta: true + example: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1698107661,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699073476,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699073498,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [{\"type\": \"file_search\"}, {\"type\": \"code_interpreter\"}],\n \"metadata\": {},\n \"incomplete_details\": null,\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" + RunStepCompletionUsage: + required: + - prompt_tokens + - completion_tokens + - total_tokens + type: object + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + nullable: true + RunStepDeltaObject: + title: Run step delta object + required: + - id + - object + - delta + type: object + properties: + id: + type: string + description: 'The identifier of the run step, which can be referenced in API endpoints.' + object: + enum: + - thread.run.step.delta + type: string + description: 'The object type, which is always `thread.run.step.delta`.' + delta: type: object properties: - event: - enum: - - thread.run.step.failed - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data + step_details: + type: object + oneOf: + - $ref: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' + description: The details of the run step. + discriminator: + propertyName: type + mapping: + message_creation: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' + tool_calls: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' + x-oaiExpandable: true + description: The delta containing the fields that have changed on the run step. + description: "Represents a run step delta i.e. any changed fields on a run step during streaming.\n" + x-oaiMeta: + name: The run step delta object + beta: true + example: "{\n \"id\": \"step_123\",\n \"object\": \"thread.run.step.delta\",\n \"delta\": {\n \"step_details\": {\n \"type\": \"tool_calls\",\n \"tool_calls\": [\n {\n \"index\": 0,\n \"id\": \"call_123\",\n \"type\": \"code_interpreter\",\n \"code_interpreter\": { \"input\": \"\", \"outputs\": [] }\n }\n ]\n }\n }\n}\n" + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + required: + - type + type: object + properties: + type: + enum: + - message_creation + type: string + description: Always `message_creation`. + message_creation: type: object properties: - event: - enum: - - thread.run.step.cancelled + message_id: type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data + description: The ID of the message that was created by this run step. + description: Details of the message creation by the run step. + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + required: + - index + - type + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + enum: + - code_interpreter + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + code_interpreter: type: object properties: - event: - enum: - - thread.run.step.expired + input: type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data + description: The input to the Code Interpreter tool call. + outputs: + type: array + items: + type: object + oneOf: + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' + discriminator: + propertyName: type + mapping: + logs: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' + image: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' + x-oaiExpandable: true + description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' + description: The Code Interpreter tool call definition. + description: Details of the Code Interpreter tool call the run step was involved in. + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + required: + - index + - type + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + enum: + - image + type: string + description: Always `image`. + image: type: object properties: - event: - enum: - - thread.message.created + file_id: type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data + description: 'The [file](/docs/api-reference/files) ID of the image.' + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + required: + - index + - type + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + enum: + - logs + type: string + description: Always `logs`. + logs: + type: string + description: The text output from the Code Interpreter tool call. + description: Text output from the Code Interpreter tool call as part of a run step. + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call + required: + - index + - type + - file_search + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + enum: + - file_search + type: string + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + file_search: type: object - properties: - event: - enum: - - thread.message.in_progress - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data + description: 'For now, this is always going to be an empty object.' + x-oaiTypeLabel: map + RunStepDeltaStepDetailsToolCallsFunctionObject: + title: Function tool call + required: + - index + - type + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + enum: + - function + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + function: type: object properties: - event: - enum: - - thread.message.delta + name: type: string - data: - $ref: '#/components/schemas/MessageDeltaObject' - description: 'Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.completed + description: The name of the function. + arguments: type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.incomplete + description: The arguments passed to the function. + output: type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - description: "Represents an event emitted when streaming a Run.\n\nEach event in a server-sent events stream has an `event` and `data` property:\n\n```\nevent: thread.created\ndata: {\"id\": \"thread_123\", \"object\": \"thread\", ...}\n```\n\nWe emit events whenever a new object is created, transitions to a new state, or is being\nstreamed in parts (deltas). For example, we emit `thread.run.created` when a new run\nis created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses\nto create a message during a run, we emit a `thread.message.created event`, a\n`thread.message.in_progress` event, many `thread.message.delta` events, and finally a\n`thread.message.completed` event.\n\nWe may add additional events over time, so we recommend handling unknown events gracefully\nin your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to\nintegrate the Assistants API with streaming.\n" - discriminator: - propertyName: event - mapping: - error: '#/components/schemas/ErrorEvent' - done: '#/components/schemas/DoneEvent' - thread.created: '#/components/schemas/AssistantStreamEvent/oneOf/2' - thread.run.created: '#/components/schemas/AssistantStreamEvent/oneOf/3' - thread.run.queued: '#/components/schemas/AssistantStreamEvent/oneOf/4' - thread.run.in_progress: '#/components/schemas/AssistantStreamEvent/oneOf/5' - thread.run.requires_action: '#/components/schemas/AssistantStreamEvent/oneOf/6' - thread.run.completed: '#/components/schemas/AssistantStreamEvent/oneOf/7' - thread.run.incomplete: '#/components/schemas/AssistantStreamEvent/oneOf/8' - thread.run.failed: '#/components/schemas/AssistantStreamEvent/oneOf/9' - thread.run.cancelling: '#/components/schemas/AssistantStreamEvent/oneOf/10' - thread.run.cancelled: '#/components/schemas/AssistantStreamEvent/oneOf/11' - thread.run.expired: '#/components/schemas/AssistantStreamEvent/oneOf/12' - thread.run.step.created: '#/components/schemas/AssistantStreamEvent/oneOf/13' - thread.run.step.in_progress: '#/components/schemas/AssistantStreamEvent/oneOf/14' - thread.run.step.delta: '#/components/schemas/AssistantStreamEvent/oneOf/15' - thread.run.step.completed: '#/components/schemas/AssistantStreamEvent/oneOf/16' - thread.run.step.failed: '#/components/schemas/AssistantStreamEvent/oneOf/17' - thread.run.step.cancelled: '#/components/schemas/AssistantStreamEvent/oneOf/18' - thread.run.step.expired: '#/components/schemas/AssistantStreamEvent/oneOf/19' - thread.message.created: '#/components/schemas/AssistantStreamEvent/oneOf/20' - thread.message.in_progress: '#/components/schemas/AssistantStreamEvent/oneOf/21' - thread.message.delta: '#/components/schemas/AssistantStreamEvent/oneOf/22' - thread.message.completed: '#/components/schemas/AssistantStreamEvent/oneOf/23' - thread.message.incomplete: '#/components/schemas/AssistantStreamEvent/oneOf/24' - x-oaiMeta: - name: Assistant stream events - beta: true - ErrorEvent: + description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' + nullable: true + description: The definition of the function that was called. + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls required: - - event - - data + - type type: object properties: - event: + type: enum: - - error + - tool_calls type: string - data: - $ref: '#/components/schemas/Error' - description: 'Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout.' - x-oaiMeta: - dataDescription: '`data` is an [error](/docs/guides/error-codes/api-errors)' - DoneEvent: + description: Always `tool_calls`. + tool_calls: + type: array + items: + oneOf: + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' + - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' + file_search: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' + function: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' + x-oaiExpandable: true + description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" + description: Details of the tool call. + RunStepDetailsMessageCreationObject: + title: Message creation required: - - event - - data + - type + - message_creation type: object properties: - event: - enum: - - done - type: string - data: + type: enum: - - '[DONE]' + - message_creation type: string - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: '`data` is `[DONE]`' - Batch: + description: Always `message_creation`. + message_creation: + required: + - message_id + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + description: Details of the message creation by the run step. + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call required: - id - - object - - endpoint - - input_file_id - - completion_window - - status - - created_at + - type + - code_interpreter type: object properties: id: type: string - object: + description: The ID of the tool call. + type: enum: - - batch - type: string - description: 'The object type, which is always `batch`.' - endpoint: + - code_interpreter type: string - description: The OpenAI API endpoint used by the batch. - errors: + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + code_interpreter: + required: + - input + - outputs type: object properties: - object: + input: type: string - description: 'The object type, which is always `list`.' - data: + description: The input to the Code Interpreter tool call. + outputs: type: array items: type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: 'The name of the parameter that caused the error, if applicable.' - nullable: true - line: - type: integer - description: 'The line number of the input file where the error occurred, if applicable.' - nullable: true - input_file_id: - type: string - description: The ID of the input file for the batch. - completion_window: - type: string - description: The time frame within which the batch should be processed. - status: - enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - type: string - description: The current status of the batch. - output_file_id: - type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: - type: string - description: The ID of the file containing the outputs of requests with errors. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - required: - - total - - completed - - failed - type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - description: The request counts for different statuses within the batch. - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - x-oaiMeta: - name: The batch object - example: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - BatchRequestInput: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' + discriminator: + propertyName: type + mapping: + logs: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' + image: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' + x-oaiExpandable: true + description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' + description: The Code Interpreter tool call definition. + description: Details of the Code Interpreter tool call the run step was involved in. + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + required: + - type + - image type: object properties: - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: + type: enum: - - POST - type: string - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: - type: string - description: 'The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.' - description: The per-line object of the batch input file - x-oaiMeta: - name: The request input object - example: "{\"custom_id\": \"request-1\", \"method\": \"POST\", \"url\": \"/v1/chat/completions\", \"body\": {\"model\": \"gpt-4o-mini\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is 2+2?\"}]}}\n" - BatchRequestOutput: - type: object - properties: - id: - type: string - custom_id: + - image type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - description: The JSON body of the response - x-oaiTypeLabel: map - nullable: true - error: + description: Always `image`. + image: + required: + - file_id type: object properties: - code: - type: string - description: A machine-readable error code. - message: + file_id: type: string - description: A human-readable error message. - description: 'For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.' - nullable: true - description: The per-line object of the batch output and error files - x-oaiMeta: - name: The request output object - example: "{\"id\": \"batch_req_wnaDys\", \"custom_id\": \"request-2\", \"response\": {\"status_code\": 200, \"request_id\": \"req_c187b3\", \"body\": {\"id\": \"chatcmpl-9758Iw\", \"object\": \"chat.completion\", \"created\": 1711475054, \"model\": \"gpt-4o-mini\", \"choices\": [{\"index\": 0, \"message\": {\"role\": \"assistant\", \"content\": \"2 + 2 equals 4.\"}, \"finish_reason\": \"stop\"}], \"usage\": {\"prompt_tokens\": 24, \"completion_tokens\": 15, \"total_tokens\": 39}, \"system_fingerprint\": null}}, \"error\": null}\n" - ListBatchesResponse: + description: 'The [file](/docs/api-reference/files) ID of the image.' + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output required: - - object - - data - - has_more + - type + - logs type: object properties: - data: - type: array - items: - $ref: '#/components/schemas/Batch' - first_id: + type: + enum: + - logs type: string - example: batch_abc123 - last_id: + description: Always `logs`. + logs: type: string - example: batch_abc456 - has_more: - type: boolean - object: + description: The text output from the Code Interpreter tool call. + description: Text output from the Code Interpreter tool call as part of a run step. + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call + required: + - id + - type + - file_search + type: object + properties: + id: + type: string + description: The ID of the tool call object. + type: enum: - - list + - file_search type: string - AuditLogActorServiceAccount: + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + file_search: + type: object + properties: + ranking_options: + $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObject' + results: + type: array + items: + $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject' + description: The results of the file search. + description: 'For now, this is always going to be an empty object.' + x-oaiTypeLabel: map + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options + required: + - ranker + - score_threshold type: object properties: - id: + ranker: + enum: + - default_2024_08_21 type: string - description: The service account id. - description: The service account that performed the audit logged action. - AuditLogActorUser: + description: The ranker used for the file search. + score_threshold: + maximum: 1 + minimum: 0 + type: number + description: The score threshold for the file search. All values must be a floating point number between 0 and 1. + description: The ranking options for the file search. + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result + required: + - file_id + - file_name + - score type: object properties: - id: + file_id: type: string - description: The user id. - email: + description: The ID of the file that result was found in. + file_name: type: string - description: The user email. - description: The user who performed the audit logged action. - AuditLogActorApiKey: + description: The name of the file that result was found in. + score: + maximum: 1 + minimum: 0 + type: number + description: The score of the result. All values must be a floating point number between 0 and 1. + content: + type: array + items: + type: object + properties: + type: + enum: + - text + type: string + description: The type of the content. + text: + type: string + description: The text content of the file. + description: The content of the result that was found. The content is only included if requested via the include query parameter. + description: A result instance of the file search. + x-oaiTypeLabel: map + RunStepDetailsToolCallsFunctionObject: + title: Function tool call + required: + - id + - type + - function type: object properties: id: type: string - description: The tracking id of the API key. + description: The ID of the tool call object. type: enum: - - user - - service_account + - function type: string - description: The type of API key. Can be either `user` or `service_account`. - user: - $ref: '#/components/schemas/AuditLogActorUser' - service_account: - $ref: '#/components/schemas/AuditLogActorServiceAccount' - description: The API Key used to perform the audit logged action. - AuditLogActorSession: + description: The type of tool call. This is always going to be `function` for this type of tool call. + function: + required: + - name + - arguments + - output + type: object + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' + nullable: true + description: The definition of the function that was called. + RunStepDetailsToolCallsObject: + title: Tool calls + required: + - type + - tool_calls type: object properties: - user: - $ref: '#/components/schemas/AuditLogActorUser' - ip_address: + type: + enum: + - tool_calls type: string - description: The IP address from which the action was performed. - description: The session in which the audit logged action was performed. - AuditLogActor: + description: Always `tool_calls`. + tool_calls: + type: array + items: + oneOf: + - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' + discriminator: + propertyName: type + mapping: + code_interpreter: '#/components/schemas/RunStepDetailsToolCallsCodeObject' + file_search: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' + function: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' + x-oaiExpandable: true + description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" + description: Details of the tool call. + RunStepObject: + title: Run steps + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage type: object properties: + id: + type: string + description: 'The identifier of the run step, which can be referenced in API endpoints.' + object: + enum: + - thread.run.step + type: string + description: 'The object type, which is always `thread.run.step`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step was created. + assistant_id: + type: string + description: 'The ID of the [assistant](/docs/api-reference/assistants) associated with the run step.' + thread_id: + type: string + description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' + run_id: + type: string + description: 'The ID of the [run](/docs/api-reference/runs) that this run step is a part of.' type: enum: - - session - - api_key + - message_creation + - tool_calls type: string - description: The type of actor. Is either `session` or `api_key`. - session: - $ref: '#/components/schemas/AuditLogActorSession' - api_key: - $ref: '#/components/schemas/AuditLogActorApiKey' - description: The actor who performed the audit logged action. - AuditLogEventType: - enum: - - api_key.created - - api_key.updated - - api_key.deleted - - invite.sent - - invite.accepted - - invite.deleted - - login.succeeded - - login.failed - - logout.succeeded - - logout.failed - - organization.updated - - project.created - - project.updated - - project.archived - - service_account.created - - service_account.updated - - service_account.deleted - - user.added - - user.updated - - user.deleted - type: string - description: The event type. - x-oaiExpandable: true - AuditLog: + description: 'The type of run step, which can be either `message_creation` or `tool_calls`.' + status: + enum: + - in_progress + - cancelled + - failed + - completed + - expired + type: string + description: 'The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.' + step_details: + type: object + oneOf: + - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' + - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' + description: The details of the run step. + discriminator: + propertyName: type + mapping: + message_creation: '#/components/schemas/RunStepDetailsMessageCreationObject' + tool_calls: '#/components/schemas/RunStepDetailsToolCallsObject' + x-oaiExpandable: true + last_error: + required: + - code + - message + type: object + properties: + code: + enum: + - server_error + - rate_limit_exceeded + type: string + description: One of `server_error` or `rate_limit_exceeded`. + message: + type: string + description: A human-readable description of the error. + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + nullable: true + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step was cancelled. + nullable: true + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step failed. + nullable: true + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the run step completed. + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + usage: + $ref: '#/components/schemas/RunStepCompletionUsage' + description: "Represents a step in execution of a run.\n" + x-oaiMeta: + name: The run step object + beta: true + example: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" + RunToolCallObject: required: - id - type - - effective_at - - actor + - function type: object properties: id: type: string - description: The ID of this log. + description: 'The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint.' type: - $ref: '#/components/schemas/AuditLogEventType' - effective_at: - type: integer - description: The Unix timestamp (in seconds) of the event. - project: + enum: + - function + type: string + description: 'The type of tool call the output is required for. For now, this is always `function`.' + function: + required: + - name + - arguments type: object properties: - id: - type: string - description: The project ID. name: type: string - description: The project title. - description: The project that the action was scoped to. Absent for actions not scoped to projects. - actor: - $ref: '#/components/schemas/AuditLogActor' - api_key.created: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - data: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to create the API key. - description: The details for events with this `type`. - api_key.updated: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - changes_requested: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to update the API key. - description: The details for events with this `type`. - api_key.deleted: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - description: The details for events with this `type`. - invite.sent: - type: object - properties: - id: - type: string - description: The ID of the invite. - data: - type: object - properties: - email: - type: string - description: The email invited to the organization. - role: - type: string - description: The role the email was invited to be. Is either `owner` or `member`. - description: The payload used to create the invite. - description: The details for events with this `type`. - invite.accepted: - type: object - properties: - id: - type: string - description: The ID of the invite. - description: The details for events with this `type`. - invite.deleted: - type: object - properties: - id: - type: string - description: The ID of the invite. - description: The details for events with this `type`. - login.failed: - type: object - properties: - error_code: - type: string - description: The error code of the failure. - error_message: - type: string - description: The error message of the failure. - description: The details for events with this `type`. - logout.failed: - type: object - properties: - error_code: - type: string - description: The error code of the failure. - error_message: - type: string - description: The error message of the failure. - description: The details for events with this `type`. - organization.updated: - type: object - properties: - id: - type: string - description: The organization ID. - changes_requested: - type: object - properties: - title: - type: string - description: The organization title. - description: - type: string - description: The organization description. - name: - type: string - description: The organization name. - settings: - type: object - properties: - threads_ui_visibility: - type: string - description: 'Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.' - usage_dashboard_visibility: - type: string - description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. - description: The payload used to update the organization settings. - description: The details for events with this `type`. - project.created: - type: object - properties: - id: - type: string - description: The project ID. - data: - type: object - properties: - name: - type: string - description: The project name. - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to create the project. - description: The details for events with this `type`. - project.updated: - type: object - properties: - id: - type: string - description: The project ID. - changes_requested: - type: object - properties: - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to update the project. - description: The details for events with this `type`. - project.archived: - type: object - properties: - id: - type: string - description: The project ID. - description: The details for events with this `type`. - service_account.created: - type: object - properties: - id: - type: string - description: The service account ID. - data: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to create the service account. - description: The details for events with this `type`. - service_account.updated: - type: object - properties: - id: - type: string - description: The service account ID. - changes_requested: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to updated the service account. - description: The details for events with this `type`. - service_account.deleted: - type: object - properties: - id: + description: The name of the function. + arguments: type: string - description: The service account ID. - description: The details for events with this `type`. - user.added: + description: The arguments that the model expects you to pass to the function. + description: The function definition. + description: Tool call objects + StaticChunkingStrategy: + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + type: object + properties: + max_chunk_size_tokens: + maximum: 4096 + minimum: 100 + type: integer + description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" + additionalProperties: false + StaticChunkingStrategyRequestParam: + title: Static Chunking Strategy + required: + - type + - static + type: object + properties: + type: + enum: + - static + type: string + description: Always `static`. + static: + $ref: '#/components/schemas/StaticChunkingStrategy' + additionalProperties: false + StaticChunkingStrategyResponseParam: + title: Static Chunking Strategy + required: + - type + - static + type: object + properties: + type: + enum: + - static + type: string + description: Always `static`. + static: + $ref: '#/components/schemas/StaticChunkingStrategy' + additionalProperties: false + SubmitToolOutputsRunRequest: + required: + - tool_outputs + type: object + properties: + tool_outputs: + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + description: A list of tools for which the outputs are being submitted. + stream: + type: boolean + description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" + nullable: true + additionalProperties: false + ThreadObject: + title: Thread + required: + - id + - object + - created_at + - tool_resources + - metadata + type: object + properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' + object: + enum: + - thread + type: string + description: 'The object type, which is always `thread`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the thread was created. + tool_resources: type: object properties: - id: - type: string - description: The user ID. - data: + code_interpreter: type: object properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to add the user to the project. - description: The details for events with this `type`. - user.updated: - type: object - properties: - id: - type: string - description: The project ID. - changes_requested: + file_ids: + maxItems: 20 + type: array + items: + type: string + description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" + file_search: type: object properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to update the user. - description: The details for events with this `type`. - user.deleted: + vector_store_ids: + maxItems: 1 + type: array + items: + type: string + description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" + description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" + nullable: true + metadata: type: object - properties: - id: - type: string - description: The user ID. - description: The details for events with this `type`. - description: A log of a user action or configuration change within this organization. + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + description: 'Represents a thread that contains [messages](/docs/api-reference/messages).' x-oaiMeta: - name: The audit log object - example: "{\n \"id\": \"req_xxx_20240101\",\n \"type\": \"api_key.created\",\n \"effective_at\": 1720804090,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.created\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource.operation\"]\n }\n }\n}\n" - ListAuditLogsResponse: + name: The thread object + beta: true + example: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1698107661,\n \"metadata\": {}\n}\n" + TranscriptionSegment: required: - - object - - data - - first_id - - last_id - - has_more + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob type: object properties: - object: - enum: - - list + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + description: Start time of the segment in seconds. + format: float + end: + type: number + description: End time of the segment in seconds. + format: float + text: type: string - data: + description: Text content of the segment. + tokens: type: array items: - $ref: '#/components/schemas/AuditLog' - first_id: + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + description: Temperature parameter used for generating the segment. + format: float + avg_logprob: + type: number + description: 'Average logprob of the segment. If the value is lower than -1, consider the logprobs failed.' + format: float + compression_ratio: + type: number + description: 'Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed.' + format: float + no_speech_prob: + type: number + description: 'Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent.' + format: float + TranscriptionWord: + required: + - word + - start + - end + type: object + properties: + word: type: string - example: audit_log-defb456h8dks - last_id: + description: The text content of the word. + start: + type: number + description: Start time of the word in seconds. + format: float + end: + type: number + description: End time of the word in seconds. + format: float + TruncationObject: + title: Thread Truncation Controls + required: + - type + type: object + properties: + type: + enum: + - auto + - last_messages type: string - example: audit_log-hnbkd8s93s - has_more: - type: boolean - Invite: + description: 'The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.' + last_messages: + minimum: 1 + type: integer + description: The number of most recent messages from the thread when constructing the context for the run. + nullable: true + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + UpdateVectorStoreRequest: + type: object + properties: + name: + type: string + description: The name of the vector store. + nullable: true + expires_after: + $ref: '#/components/schemas/VectorStoreExpirationAfter' + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + additionalProperties: false + Upload: + title: Upload required: - - object - - id - - email - - role - - status - - invited_at + - bytes + - created_at - expires_at + - filename + - id + - purpose + - status type: object properties: - object: - enum: - - organization.invite - type: string - description: 'The object type, which is always `organization.invite`' id: type: string - description: 'The identifier, which can be referenced in API endpoints' - email: + description: 'The Upload unique identifier, which can be referenced in API endpoints.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: type: string - description: The email address of the individual to whom the invite was sent - role: - enum: - - owner - - reader + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: type: string - description: '`owner` or `reader`' + description: 'The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values.' status: enum: - - accepted - - expired - pending + - completed + - cancelled + - expired type: string - description: '`accepted`,`expired`, or `pending`' - invited_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was sent. + description: The status of the Upload. expires_at: type: integer - description: The Unix timestamp (in seconds) of when the invite expires. - accepted_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was accepted. - description: Represents an individual `invite` to the organization. - x-oaiMeta: - name: The invite object - example: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - InviteListResponse: - required: - - object - - data - type: object - properties: + description: The Unix timestamp (in seconds) for when the Upload was created. object: enum: - - list - type: string - description: 'The object type, which is always `list`' - data: - type: array - items: - $ref: '#/components/schemas/Invite' - first_id: - type: string - description: The first `invite_id` in the retrieved `list` - last_id: + - upload type: string - description: The last `invite_id` in the retrieved `list` - has_more: - type: boolean - description: The `has_more` property is used for pagination to indicate there are additional results. - InviteRequest: + description: 'The object type, which is always "upload".' + file: + $ref: '#/components/schemas/OpenAIFile' + description: "The Upload object can accept byte chunks in the form of Parts.\n" + x-oaiMeta: + name: The upload object + example: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" + UploadPart: + title: UploadPart required: - - email - - role + - created_at + - id + - object + - upload_id type: object properties: - email: + id: type: string - description: Send an email to this address - role: - enum: - - reader - - owner + description: 'The upload Part unique identifier, which can be referenced in API endpoints.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: type: string - description: '`owner` or `reader`' - InviteDeleteResponse: - required: - - object - - id - - deleted - type: object - properties: + description: The ID of the Upload object that this Part was added to. object: enum: - - organization.invite.deleted - type: string - description: 'The object type, which is always `organization.invite.deleted`' - id: + - upload.part type: string - deleted: - type: boolean - User: + description: 'The object type, which is always `upload.part`.' + description: "The upload Part represents a chunk of bytes we can add to an Upload object.\n" + x-oaiMeta: + name: The upload part object + example: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719186911,\n \"upload_id\": \"upload_abc123\"\n}\n" + UsageAudioSpeechesResult: required: - object - - id - - name - - email - - role - - added_at + - characters + - num_model_requests type: object properties: object: enum: - - organization.user + - organization.usage.audio_speeches.result type: string - description: 'The object type, which is always `organization.user`' - id: + characters: + type: integer + description: The number of characters processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: type: string - description: 'The identifier, which can be referenced in API endpoints' - name: + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + user_id: type: string - description: The name of the user - email: + description: 'When `group_by=user_id`, this field provides the user ID of the grouped usage result.' + api_key_id: type: string - description: The email address of the user - role: - enum: - - owner - - reader + description: 'When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result.' + model: type: string - description: '`owner` or `reader`' - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the user was added. - description: Represents an individual `user` within an organization. + description: 'When `group_by=model`, this field provides the model name of the grouped usage result.' + description: The aggregated audio speeches usage details of the specific time bucket. x-oaiMeta: - name: The user object - example: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - UserListResponse: + name: Audio speeches usage object + example: "{\n \"object\": \"orgainzation.usage.audio_speeches.result\",\n \"characters\": 45,\n \"num_model_requests\": 1,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"tts-1\"\n}\n" + UsageAudioTranscriptionsResult: required: - object - - data - - first_id - - last_id - - has_more + - seconds + - num_model_requests type: object properties: object: enum: - - list + - organization.usage.audio_transcriptions.result type: string - data: - type: array - items: - $ref: '#/components/schemas/User' - first_id: + seconds: + type: integer + description: The number of seconds processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: type: string - last_id: + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + user_id: type: string - has_more: - type: boolean - UserRoleUpdateRequest: - required: - - role - type: object - properties: - role: - enum: - - owner - - reader + description: 'When `group_by=user_id`, this field provides the user ID of the grouped usage result.' + api_key_id: type: string - description: '`owner` or `reader`' - UserDeleteResponse: + description: 'When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result.' + model: + type: string + description: 'When `group_by=model`, this field provides the model name of the grouped usage result.' + description: The aggregated audio transcriptions usage details of the specific time bucket. + x-oaiMeta: + name: Audio transcriptions usage object + example: "{\n \"object\": \"orgainzation.usage.audio_transcriptions.result\",\n \"seconds\": 10,\n \"num_model_requests\": 1,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"tts-1\"\n}\n" + UsageCodeInterpreterSessionsResult: required: - object - - id - - deleted + - sessions type: object properties: object: enum: - - organization.user.deleted + - organization.usage.code_interpreter_sessions.result type: string - id: + sessions: + type: integer + description: The number of code interpreter sessions. + project_id: type: string - deleted: - type: boolean - Project: + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + description: The aggregated code interpreter sessions usage details of the specific time bucket. + x-oaiMeta: + name: Code interpreter sessions usage object + example: "{\n \"object\": \"orgainzation.usage.code_interpreter_sessions.result\",\n \"sessions\": 1,\n \"project_id\": \"proj_abc\"\n}\n" + UsageCompletionsResult: required: - - id - object - - name - - created_at - - status + - input_tokens + - output_tokens + - num_model_requests type: object properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' object: enum: - - organization.project - type: string - description: 'The object type, which is always `organization.project`' - name: + - organization.usage.completions.result type: string - description: The name of the project. This appears in reporting. - created_at: + input_tokens: type: integer - description: The Unix timestamp (in seconds) of when the project was created. - archived_at: + description: The number of input tokens used. + input_cached_tokens: type: integer - description: The Unix timestamp (in seconds) of when the project was archived or `null`. - nullable: true - status: - enum: - - active - - archived + description: The number of input tokens that has been cached from previous requests. + output_tokens: + type: integer + description: The number of output tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: type: string - description: '`active` or `archived`' - description: Represents an individual project. + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + user_id: + type: string + description: 'When `group_by=user_id`, this field provides the user ID of the grouped usage result.' + api_key_id: + type: string + description: 'When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result.' + model: + type: string + description: 'When `group_by=model`, this field provides the model name of the grouped usage result.' + batch: + type: boolean + description: 'When `group_by=batch`, this field tells whether the grouped usage result is batch or not.' + description: The aggregated completions usage details of the specific time bucket. x-oaiMeta: - name: The project object - example: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - ProjectListResponse: + name: Completions usage object + example: "{\n \"object\": \"orgainzation.usage.completions.result\",\n \"input_tokens\": 5000,\n \"output_tokens\": 1000,\n \"input_cached_tokens\": 4000,\n \"num_model_requests\": 5,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"batch\": false\n}\n" + UsageEmbeddingsResult: required: - object - - data - - first_id - - last_id - - has_more + - input_tokens + - num_model_requests type: object properties: object: enum: - - list + - organization.usage.embeddings.result type: string - data: - type: array - items: - $ref: '#/components/schemas/Project' - first_id: + input_tokens: + type: integer + description: The number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: type: string - last_id: + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + user_id: type: string - has_more: - type: boolean - ProjectCreateRequest: - required: - - name - type: object - properties: - name: + description: 'When `group_by=user_id`, this field provides the user ID of the grouped usage result.' + api_key_id: type: string - description: 'The friendly name of the project, this name appears in reports.' - ProjectUpdateRequest: - required: - - name - type: object - properties: - name: + description: 'When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result.' + model: type: string - description: 'The updated name of the project, this name appears in reports.' - DefaultProjectErrorResponse: + description: 'When `group_by=model`, this field provides the model name of the grouped usage result.' + description: The aggregated embeddings usage details of the specific time bucket. + x-oaiMeta: + name: Embeddings usage object + example: "{\n \"object\": \"orgainzation.usage.embeddings.result\",\n \"input_tokens\": 20,\n \"num_model_requests\": 2,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"text-embedding-ada-002-v2\"\n}\n" + UsageImagesResult: required: - - code - - message + - object + - images + - num_model_requests type: object properties: - code: + object: + enum: + - organization.usage.images.result + type: string + images: type: integer - message: + description: The number of images processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + source: type: string - ProjectUser: + description: 'When `group_by=source`, this field provides the source of the grouped usage result, possible values are `image.generation`, `image.edit`, `image.variation`.' + size: + type: string + description: 'When `group_by=size`, this field provides the image size of the grouped usage result.' + project_id: + type: string + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + user_id: + type: string + description: 'When `group_by=user_id`, this field provides the user ID of the grouped usage result.' + api_key_id: + type: string + description: 'When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result.' + model: + type: string + description: 'When `group_by=model`, this field provides the model name of the grouped usage result.' + description: The aggregated images usage details of the specific time bucket. + x-oaiMeta: + name: Images usage object + example: "{\n \"object\": \"orgainzation.usage.images.result\",\n \"images\": 2,\n \"num_model_requests\": 2,\n \"size\": \"1024x1024\",\n \"source\": \"image.generation\",\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"dall-e-3\"\n}\n" + UsageModerationsResult: required: - object - - id - - name - - email - - role - - added_at + - input_tokens + - num_model_requests type: object properties: object: enum: - - organization.project.user + - organization.usage.moderations.result type: string - description: 'The object type, which is always `organization.project.user`' - id: + input_tokens: + type: integer + description: The number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: type: string - description: 'The identifier, which can be referenced in API endpoints' - name: + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + user_id: type: string - description: The name of the user - email: + description: 'When `group_by=user_id`, this field provides the user ID of the grouped usage result.' + api_key_id: type: string - description: The email address of the user - role: - enum: - - owner - - member + description: 'When `group_by=api_key_id`, this field provides the API key ID of the grouped usage result.' + model: type: string - description: '`owner` or `member`' - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was added. - description: Represents an individual user in a project. + description: 'When `group_by=model`, this field provides the model name of the grouped usage result.' + description: The aggregated moderations usage details of the specific time bucket. x-oaiMeta: - name: The project user object - example: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - ProjectUserListResponse: + name: Moderations usage object + example: "{\n \"object\": \"orgainzation.usage.moderations.result\",\n \"input_tokens\": 20,\n \"num_model_requests\": 2,\n \"project_id\": \"proj_abc\",\n \"user_id\": \"user-abc\",\n \"api_key_id\": \"key_abc\",\n \"model\": \"text-moderation\"\n}\n" + UsageResponse: required: - object - data - - first_id - - last_id - has_more + - next_page type: object properties: object: + enum: + - page type: string data: type: array items: - $ref: '#/components/schemas/ProjectUser' - first_id: - type: string - last_id: - type: string + $ref: '#/components/schemas/UsageTimeBucket' has_more: type: boolean - ProjectUserCreateRequest: - required: - - user_id - - role - type: object - properties: - user_id: - type: string - description: The ID of the user. - role: - enum: - - owner - - member + next_page: type: string - description: '`owner` or `member`' - ProjectUserUpdateRequest: + UsageTimeBucket: required: - - role + - object + - start_time + - end_time + - result type: object properties: - role: + object: enum: - - owner - - member + - bucket type: string - description: '`owner` or `member`' - ProjectUserDeleteResponse: + start_time: + type: integer + end_time: + type: integer + result: + type: array + items: + oneOf: + - $ref: '#/components/schemas/UsageCompletionsResult' + - $ref: '#/components/schemas/UsageEmbeddingsResult' + - $ref: '#/components/schemas/UsageModerationsResult' + - $ref: '#/components/schemas/UsageImagesResult' + - $ref: '#/components/schemas/UsageAudioSpeechesResult' + - $ref: '#/components/schemas/UsageAudioTranscriptionsResult' + - $ref: '#/components/schemas/UsageVectorStoresResult' + - $ref: '#/components/schemas/UsageCodeInterpreterSessionsResult' + - $ref: '#/components/schemas/CostsResult' + discriminator: + propertyName: object + mapping: + organization.usage.completions.result: '#/components/schemas/UsageCompletionsResult' + organization.usage.embeddings.result: '#/components/schemas/UsageEmbeddingsResult' + organization.usage.moderations.result: '#/components/schemas/UsageModerationsResult' + organization.usage.images.result: '#/components/schemas/UsageImagesResult' + organization.usage.audio_speeches.result: '#/components/schemas/UsageAudioSpeechesResult' + organization.usage.audio_transcriptions.result: '#/components/schemas/UsageAudioTranscriptionsResult' + organization.usage.vector_stores.result: '#/components/schemas/UsageVectorStoresResult' + organization.usage.code_interpreter_sessions.result: '#/components/schemas/UsageCodeInterpreterSessionsResult' + organization.costs.result: '#/components/schemas/CostsResult' + UsageVectorStoresResult: required: - object - - id - - deleted + - usage_bytes type: object properties: object: enum: - - organization.project.user.deleted + - organization.usage.vector_stores.result type: string - id: + usage_bytes: + type: integer + description: The vector stores usage in bytes. + project_id: type: string - deleted: - type: boolean - ProjectServiceAccount: + description: 'When `group_by=project_id`, this field provides the project ID of the grouped usage result.' + description: The aggregated vector stores usage details of the specific time bucket. + x-oaiMeta: + name: Vector stores usage object + example: "{\n \"object\": \"orgainzation.usage.vector_stores.result\",\n \"usage_bytes\": 1024,\n \"project_id\": \"proj_abc\"\n}\n" + User: required: - object - id - name + - email - role - - created_at + - added_at type: object properties: object: enum: - - organization.project.service_account + - organization.user type: string - description: 'The object type, which is always `organization.project.service_account`' + description: 'The object type, which is always `organization.user`' id: type: string description: 'The identifier, which can be referenced in API endpoints' name: type: string - description: The name of the service account + description: The name of the user + email: + type: string + description: The email address of the user role: enum: - owner - - member + - reader type: string - description: '`owner` or `member`' - created_at: + description: '`owner` or `reader`' + added_at: type: integer - description: The Unix timestamp (in seconds) of when the service account was created - description: Represents an individual service account in a project. + description: The Unix timestamp (in seconds) of when the user was added. + description: Represents an individual `user` within an organization. x-oaiMeta: - name: The project service account object - example: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - ProjectServiceAccountListResponse: + name: The user object + example: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" + UserDeleteResponse: + required: + - object + - id + - deleted + type: object + properties: + object: + enum: + - organization.user.deleted + type: string + id: + type: string + deleted: + type: boolean + UserListResponse: required: - object - data @@ -10735,167 +13923,258 @@ components: data: type: array items: - $ref: '#/components/schemas/ProjectServiceAccount' + $ref: '#/components/schemas/User' first_id: type: string last_id: type: string has_more: type: boolean - ProjectServiceAccountCreateRequest: + UserRoleUpdateRequest: required: - - name + - role type: object properties: - name: + role: + enum: + - owner + - reader type: string - description: The name of the service account being created. - ProjectServiceAccountCreateResponse: + description: '`owner` or `reader`' + VectorStoreExpirationAfter: + title: Vector store expiration policy required: - - object - - id - - name - - role - - created_at - - api_key + - anchor + - days type: object properties: - object: - enum: - - organization.project.service_account - type: string - id: - type: string - name: - type: string - role: + anchor: enum: - - member + - last_active_at type: string - description: Service accounts can only have one role of type `member` - created_at: + description: 'Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.' + days: + maximum: 365 + minimum: 1 type: integer - api_key: - $ref: '#/components/schemas/ProjectServiceAccountApiKey' - ProjectServiceAccountApiKey: + description: The number of days after the anchor time that the vector store will expire. + description: The expiration policy for a vector store. + VectorStoreFileBatchObject: + title: Vector store file batch required: + - id - object - - value - - name - created_at - - id + - vector_store_id + - status + - file_counts type: object properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' object: enum: - - organization.project.service_account.api_key - type: string - description: 'The object type, which is always `organization.project.service_account.api_key`' - value: - type: string - name: + - vector_store.files_batch type: string + description: 'The object type, which is always `vector_store.file_batch`.' created_at: type: integer - id: + description: The Unix timestamp (in seconds) for when the vector store files batch was created. + vector_store_id: type: string - ProjectServiceAccountDeleteResponse: - required: - - object - - id - - deleted - type: object - properties: - object: + description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' + status: enum: - - organization.project.service_account.deleted - type: string - id: + - in_progress + - completed + - cancelled + - failed type: string - deleted: - type: boolean - ProjectApiKey: + description: 'The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`.' + file_counts: + required: + - in_progress + - completed + - cancelled + - failed + - total + type: object + properties: + in_progress: + type: integer + description: The number of files that are currently being processed. + completed: + type: integer + description: The number of files that have been processed. + failed: + type: integer + description: The number of files that have failed to process. + cancelled: + type: integer + description: The number of files that where cancelled. + total: + type: integer + description: The total number of files. + description: A batch of files attached to a vector store. + x-oaiMeta: + name: The vector store files batch object + beta: true + example: "{\n \"id\": \"vsfb_123\",\n \"object\": \"vector_store.files_batch\",\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 100\n }\n}\n" + VectorStoreFileObject: + title: Vector store files required: + - id - object - - redacted_value - - name + - usage_bytes - created_at - - id - - owner + - vector_store_id + - status + - last_error type: object properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' object: enum: - - organization.project.api_key - type: string - description: 'The object type, which is always `organization.project.api_key`' - redacted_value: - type: string - description: The redacted value of the API key - name: + - vector_store.file type: string - description: The name of the API key + description: 'The object type, which is always `vector_store.file`.' + usage_bytes: + type: integer + description: The total vector store usage in bytes. Note that this may be different from the original file size. created_at: type: integer - description: The Unix timestamp (in seconds) of when the API key was created - id: + description: The Unix timestamp (in seconds) for when the vector store file was created. + vector_store_id: type: string - description: 'The identifier, which can be referenced in API endpoints' - owner: + description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' + status: + enum: + - in_progress + - completed + - cancelled + - failed + type: string + description: 'The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use.' + last_error: + required: + - code + - message type: object properties: - type: + code: enum: - - user - - service_account + - server_error + - unsupported_file + - invalid_file type: string - description: '`user` or `service_account`' - user: - $ref: '#/components/schemas/ProjectUser' - service_account: - $ref: '#/components/schemas/ProjectServiceAccount' - description: Represents an individual API key in a project. + description: One of `server_error` or `rate_limit_exceeded`. + message: + type: string + description: A human-readable description of the error. + description: The last error associated with this vector store file. Will be `null` if there are no errors. + nullable: true + chunking_strategy: + type: object + oneOf: + - $ref: '#/components/schemas/StaticChunkingStrategyResponseParam' + - $ref: '#/components/schemas/OtherChunkingStrategyResponseParam' + description: The strategy used to chunk the file. + discriminator: + propertyName: type + mapping: + static: '#/components/schemas/StaticChunkingStrategyResponseParam' + other: '#/components/schemas/OtherChunkingStrategyResponseParam' + x-oaiExpandable: true + description: A list of files attached to a vector store. x-oaiMeta: - name: The project API key object - example: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" - ProjectApiKeyListResponse: + name: The vector store file object + beta: true + example: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"usage_bytes\": 1234,\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"last_error\": null,\n \"chunking_strategy\": {\n \"type\": \"static\",\n \"static\": {\n \"max_chunk_size_tokens\": 800,\n \"chunk_overlap_tokens\": 400\n }\n }\n}\n" + VectorStoreObject: + title: Vector store required: + - id - object - - data - - first_id - - last_id - - has_more + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata type: object properties: + id: + type: string + description: 'The identifier, which can be referenced in API endpoints.' object: enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/ProjectApiKey' - first_id: + - vector_store type: string - last_id: + description: 'The object type, which is always `vector_store`.' + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the vector store was created. + name: type: string - has_more: - type: boolean - ProjectApiKeyDeleteResponse: - required: - - object - - id - - deleted - type: object - properties: - object: + description: The name of the vector store. + usage_bytes: + type: integer + description: The total number of bytes used by the files in the vector store. + file_counts: + required: + - in_progress + - completed + - failed + - cancelled + - total + type: object + properties: + in_progress: + type: integer + description: The number of files that are currently being processed. + completed: + type: integer + description: The number of files that have been successfully processed. + failed: + type: integer + description: The number of files that have failed to process. + cancelled: + type: integer + description: The number of files that were cancelled. + total: + type: integer + description: The total number of files. + status: enum: - - organization.project.api_key.deleted - type: string - id: + - expired + - in_progress + - completed type: string - deleted: - type: boolean + description: 'The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use.' + expires_after: + $ref: '#/components/schemas/VectorStoreExpirationAfter' + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the vector store will expire. + nullable: true + last_active_at: + type: integer + description: The Unix timestamp (in seconds) for when the vector store was last active. + nullable: true + metadata: + type: object + description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.\n" + nullable: true + x-oaiTypeLabel: map + description: A vector store is a collection of processed files can be used by the `file_search` tool. + x-oaiMeta: + name: The vector store object + beta: true + example: "{\n \"id\": \"vs_123\",\n \"object\": \"vector_store\",\n \"created_at\": 1698107661,\n \"usage_bytes\": 123456,\n \"last_active_at\": 1698107661,\n \"name\": \"my_vector_store\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"cancelled\": 0,\n \"failed\": 0,\n \"total\": 100\n },\n \"metadata\": {},\n \"last_used_at\": 1698107661\n}\n" securitySchemes: ApiKeyAuth: type: http @@ -10926,7 +14205,7 @@ tags: - name: Models description: List and describe the various models available in the API. - name: Moderations - description: 'Given a input text, outputs if the model classifies it as potentially harmful.' + description: 'Given text and/or image inputs, classifies if those inputs are potentially harmful.' - name: Audit Logs description: List user actions and configuration changes within this organization. x-oaiMeta: @@ -10935,8 +14214,12 @@ x-oaiMeta: title: Endpoints - id: assistants title: Assistants + beta: true - id: administration title: Administration + - id: realtime + title: Realtime + beta: true - id: legacy title: Legacy groups: @@ -10962,7 +14245,7 @@ x-oaiMeta: path: verbose-json-object - id: chat title: Chat - description: "Given a list of messages comprising a conversation, the model will return a response.\n\nRelated guide: [Chat Completions](/docs/guides/text-generation)\n" + description: "Given a list of messages comprising a conversation, the model will return a response.\nRelated guide: [Chat Completions](/docs/guides/text-generation)\n" navigationGroup: endpoints sections: - type: endpoint @@ -10976,7 +14259,7 @@ x-oaiMeta: path: streaming - id: embeddings title: Embeddings - description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\n\nRelated guide: [Embeddings](/docs/guides/embeddings)\n" + description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\nRelated guide: [Embeddings](/docs/guides/embeddings)\n" navigationGroup: endpoints sections: - type: endpoint @@ -10987,7 +14270,7 @@ x-oaiMeta: path: object - id: fine-tuning title: Fine-tuning - description: "Manage fine-tuning jobs to tailor a model to your specific training data.\n\nRelated guide: [Fine-tune models](/docs/guides/fine-tuning)\n" + description: "Manage fine-tuning jobs to tailor a model to your specific training data.\nRelated guide: [Fine-tune models](/docs/guides/fine-tuning)\n" navigationGroup: endpoints sections: - type: endpoint @@ -11025,7 +14308,7 @@ x-oaiMeta: path: checkpoint-object - id: batch title: Batch - description: "Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.\n\nRelated guide: [Batch](/docs/guides/batch)\n" + description: "Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.\nRelated guide: [Batch](/docs/guides/batch)\n" navigationGroup: endpoints sections: - type: endpoint @@ -11097,7 +14380,7 @@ x-oaiMeta: path: part-object - id: images title: Images - description: "Given a prompt and/or an input image, the model will generate a new image.\n\nRelated guide: [Image generation](/docs/guides/images)\n" + description: "Given a prompt and/or an input image, the model will generate a new image.\nRelated guide: [Image generation](/docs/guides/images)\n" navigationGroup: endpoints sections: - type: endpoint @@ -11131,7 +14414,7 @@ x-oaiMeta: path: object - id: moderations title: Moderations - description: "Given some input text, outputs if the model classifies it as potentially harmful across several categories.\n\nRelated guide: [Moderations](/docs/guides/moderation)\n" + description: "Given text and/or image inputs, classifies if those inputs are potentially harmful across several categories.\nRelated guide: [Moderations](/docs/guides/moderation)\n" navigationGroup: endpoints sections: - type: endpoint @@ -11240,7 +14523,7 @@ x-oaiMeta: key: RunObject path: object - id: run-steps - title: Run Steps + title: Run steps beta: true description: "Represents the steps (model and tool calls) taken during the run.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" navigationGroup: assistants @@ -11255,7 +14538,7 @@ x-oaiMeta: key: RunStepObject path: step-object - id: vector-stores - title: Vector Stores + title: Vector stores beta: true description: "Vector stores are used to store files for use by the `file_search` tool.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" navigationGroup: assistants @@ -11279,7 +14562,7 @@ x-oaiMeta: key: VectorStoreObject path: object - id: vector-stores-files - title: Vector Store Files + title: Vector store files beta: true description: "Vector store files represent files inside a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" navigationGroup: assistants @@ -11300,9 +14583,9 @@ x-oaiMeta: key: VectorStoreFileObject path: file-object - id: vector-stores-file-batches - title: Vector Store File Batches + title: Vector store file batches beta: true - description: "Vector store file batches represent operations to add multiple files to a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" + description: "Vector store file batches represent operations to add multiple files to a vector store.\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" navigationGroup: assistants sections: - type: endpoint @@ -11323,7 +14606,7 @@ x-oaiMeta: - id: assistants-streaming title: Streaming beta: true - description: "Stream the result of executing a Run or resuming a Run after submitting tool outputs.\n\nYou can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun),\n[Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs)\nendpoints by passing `\"stream\": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream.\n\nOur Node and Python SDKs provide helpful utilities to make streaming easy. Reference the\n[Assistants API quickstart](/docs/assistants/overview) to learn more.\n" + description: "Stream the result of executing a Run or resuming a Run after submitting tool outputs.\nYou can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun),\n[Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs)\nendpoints by passing `\"stream\": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream.\nOur Node and Python SDKs provide helpful utilities to make streaming easy. Reference the\n[Assistants API quickstart](/docs/assistants/overview) to learn more.\n" navigationGroup: assistants sections: - type: object @@ -11336,8 +14619,8 @@ x-oaiMeta: key: AssistantStreamEvent path: events - id: administration - title: Overview - description: "Programmatically manage your organization. \n\nThe Audit Logs endpoint provides a log of all actions taken in the \norganization for security and monitoring purposes.\n\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\n\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization)\n" + title: Administration + description: "Programmatically manage your organization. \nThe Audit Logs endpoint provides a log of all actions taken in the organization for security and monitoring purposes.\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices#setting-up-your-organization)\n" navigationGroup: administration - id: invite title: Invites @@ -11381,7 +14664,7 @@ x-oaiMeta: path: object - id: projects title: Projects - description: "Manage the projects within an orgnanization includes creation, updating, and archiving or projects. \nThe Default project cannot be modified or archived. \n" + description: "Manage the projects within an orgnanization includes creation, updating, and archiving or projects. \nThe Default project cannot be modified or archived.\n" navigationGroup: administration sections: - type: endpoint @@ -11403,8 +14686,8 @@ x-oaiMeta: key: Project path: object - id: project-users - title: Project Users - description: "Manage users within a project, including adding, updating roles, and removing users. \nUsers cannot be removed from the Default project, unless they are being removed from the organization. \n" + title: Project users + description: "Manage users within a project, including adding, updating roles, and removing users. \nUsers cannot be removed from the Default project, unless they are being removed from the organization.\n" navigationGroup: administration sections: - type: endpoint @@ -11426,7 +14709,7 @@ x-oaiMeta: key: ProjectUser path: object - id: project-service-accounts - title: Project Service Accounts + title: Project service accounts description: "Manage service accounts within a project. A service account is a bot user that is not associated with a user. \nIf a user leaves an organization, their keys and membership in projects will no longer work. Service accounts \ndo not have this limitation. However, service accounts can also be deleted from a project.\n" navigationGroup: administration sections: @@ -11446,8 +14729,8 @@ x-oaiMeta: key: ProjectServiceAccount path: object - id: project-api-keys - title: Project API Keys - description: "Manage API keys for a given project. Supports listing and deleting keys for users. \nThis API does not allow issuing keys for users, as users need to authorize themselves to generate keys. \n" + title: Project API keys + description: "Manage API keys for a given project. Supports listing and deleting keys for users. \nThis API does not allow issuing keys for users, as users need to authorize themselves to generate keys.\n" navigationGroup: administration sections: - type: endpoint @@ -11462,9 +14745,23 @@ x-oaiMeta: - type: object key: ProjectApiKey path: object + - id: project-rate-limits + title: Project rate limits + description: "Manage rate limits per model for projects. Rate limits may be configured to be equal to or lower than the organization's rate limits.\n" + navigationGroup: administration + sections: + - type: endpoint + key: list-project-rate-limits + path: list + - type: endpoint + key: update-project-rate-limits + path: update + - type: object + key: ProjectRateLimit + path: object - id: audit-logs - title: Audit Logs - description: "Logs of user actions and configuration changes within this organization. \n\nTo log events, you must activate logging in the [Organization Settings](/settings/organization/general). \nOnce activated, for security reasons, logging cannot be deactivated.\n" + title: Audit logs + description: "Logs of user actions and configuration changes within this organization. \nTo log events, you must activate logging in the [Organization Settings](/settings/organization/general). \nOnce activated, for security reasons, logging cannot be deactivated.\n" navigationGroup: administration sections: - type: endpoint @@ -11473,11 +14770,196 @@ x-oaiMeta: - type: object key: AuditLog path: object + - id: usage + title: Usage + description: "The **Usage API** provides detailed insights into your activity across the OpenAI API. It also includes a separate [Costs endpoint](/docs/api-reference/usage/costs), which offers visibility into your spend, breaking down consumption by invoice line items and project IDs.\n\nWhile the Usage API delivers granular usage data, it may not always reconcile perfectly with the Costs due to minor differences in how usage and spend are recorded. For financial purposes, we recommend using the [Costs endpoint](/docs/api-reference/usage/costs) or the [Costs tab](/settings/organization/usage) in the Usage Dashboard, which will reconcile back to your billing invoice.\n" + navigationGroup: administration + sections: + - type: endpoint + key: usage-completions + path: completions + - type: object + key: UsageCompletionsResult + path: completions_object + - type: endpoint + key: usage-embeddings + path: embeddings + - type: object + key: UsageEmbeddingsResult + path: embeddings_object + - type: endpoint + key: usage-moderations + path: moderations + - type: object + key: UsageModerationsResult + path: moderations_object + - type: endpoint + key: usage-images + path: images + - type: object + key: UsageImagesResult + path: images_object + - type: endpoint + key: usage-audio-speeches + path: audio_speeches + - type: object + key: UsageAudioSpeechesResult + path: audio_speeches_object + - type: endpoint + key: usage-audio-transcriptions + path: audio_transcriptions + - type: object + key: UsageAudioTranscriptionsResult + path: audio_transcriptions_object + - type: endpoint + key: usage-vector-stores + path: vector_stores + - type: object + key: UsageVectorStoresResult + path: vector_stores_object + - type: endpoint + key: usage-code-interpreter-sessions + path: code_interpreter_sessions + - type: object + key: UsageCodeInterpreterSessionsResult + path: code_interpreter_sessions_object + - type: endpoint + key: usage-costs + path: costs + - type: object + key: CostsResult + path: costs_object + - id: realtime + title: Realtime + beta: true + description: "Communicate with a GPT-4o class model live, in real time, over WebSocket.\nProduces both audio and text transcriptions.\n[Learn more about the Realtime API](/docs/guides/realtime).\n" + navigationGroup: realtime + - id: realtime-client-events + title: Client events + description: "These are events that the OpenAI Realtime WebSocket server will accept from the client.\n" + navigationGroup: realtime + sections: + - type: object + key: RealtimeClientEventSessionUpdate + path: + - type: object + key: RealtimeClientEventInputAudioBufferAppend + path: + - type: object + key: RealtimeClientEventInputAudioBufferCommit + path: + - type: object + key: RealtimeClientEventInputAudioBufferClear + path: + - type: object + key: RealtimeClientEventConversationItemCreate + path: + - type: object + key: RealtimeClientEventConversationItemTruncate + path: + - type: object + key: RealtimeClientEventConversationItemDelete + path: + - type: object + key: RealtimeClientEventResponseCreate + path: + - type: object + key: RealtimeClientEventResponseCancel + path: + - id: realtime-server-events + title: Server events + description: "These are events emitted from the OpenAI Realtime WebSocket server to the client.\n" + navigationGroup: realtime + sections: + - type: object + key: RealtimeServerEventError + path: + - type: object + key: RealtimeServerEventSessionCreated + path: + - type: object + key: RealtimeServerEventSessionUpdated + path: + - type: object + key: RealtimeServerEventConversationCreated + path: + - type: object + key: RealtimeServerEventConversationItemCreated + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionFailed + path: + - type: object + key: RealtimeServerEventConversationItemTruncated + path: + - type: object + key: RealtimeServerEventConversationItemDeleted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCommitted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCleared + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStarted + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStopped + path: + - type: object + key: RealtimeServerEventResponseCreated + path: + - type: object + key: RealtimeServerEventResponseDone + path: + - type: object + key: RealtimeServerEventResponseOutputItemAdded + path: + - type: object + key: RealtimeServerEventResponseOutputItemDone + path: + - type: object + key: RealtimeServerEventResponseContentPartAdded + path: + - type: object + key: RealtimeServerEventResponseContentPartDone + path: + - type: object + key: RealtimeServerEventResponseTextDelta + path: + - type: object + key: RealtimeServerEventResponseTextDone + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDelta + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDone + path: + - type: object + key: RealtimeServerEventResponseAudioDelta + path: + - type: object + key: RealtimeServerEventResponseAudioDone + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDelta + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDone + path: + - type: object + key: RealtimeServerEventRateLimitsUpdated + path: - id: completions title: Completions legacy: true navigationGroup: legacy - description: "Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models.\n" + description: "Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation#text-generation-models) to leverage our best and newest models.\n" sections: - type: endpoint key: createCompletion diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_NewWarnings.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_NewWarnings.verified.txt index cc16fe346a..ad47dbb93f 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_NewWarnings.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_NewWarnings.verified.txt @@ -1,7 +1 @@ -[ - { - RuleName: SchemaMismatchedDataType, - Message: Data and type mismatch found., - Pointer: #/components/schemas/CreateCompletionRequest/properties/logprobs/default - } -] \ No newline at end of file +[] \ No newline at end of file diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_OriginalWarnings.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_OriginalWarnings.verified.txt index cc16fe346a..ad47dbb93f 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_OriginalWarnings.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/ValidationTests.Validation_OriginalWarnings.verified.txt @@ -1,7 +1 @@ -[ - { - RuleName: SchemaMismatchedDataType, - Message: Data and type mismatch found., - Pointer: #/components/schemas/CreateCompletionRequest/properties/logprobs/default - } -] \ No newline at end of file +[] \ No newline at end of file diff --git a/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/_.verified.txt b/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/_.verified.txt index efd3f5c7a1..ba2d224780 100644 --- a/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/_.verified.txt +++ b/src/tests/AutoSDK.UnitTests/Snapshots/Validation/ConvertedSpecs/openai.yaml/_.verified.txt @@ -1,11314 +1,24907 @@ -openapi: 3.0.1 +openapi: 3.0.0 info: - title: OpenAI API - description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. - termsOfService: https://openai.com/policies/terms-of-use - contact: - name: OpenAI Support - url: https://help.openai.com/ - license: - name: MIT - url: https://github.com/openai/openai-openapi/blob/master/LICENSE - version: '2.3.0' + title: OpenAI API + description: The OpenAI REST API. Please see + https://platform.openai.com/docs/api-reference for more details. + version: 2.3.0 + termsOfService: https://openai.com/policies/terms-of-use + contact: + name: OpenAI Support + url: https://help.openai.com/ + license: + name: MIT + url: https://github.com/openai/openai-openapi/blob/master/LICENSE servers: - - url: https://api.openai.com/v1 -paths: - /chat/completions: - post: - tags: - - Chat - summary: Creates a model response for the given chat conversation. - operationId: createChatCompletion - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateChatCompletionResponse' - x-oaiMeta: - name: Create chat completion - group: chat - returns: "Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed.\n" - path: create - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ]\n)\n\nprint(completion.choices[0].message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"system\", content: \"You are a helpful assistant.\" }],\n model: \"VAR_model_id\",\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - - title: Image input - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"What'\\''s in this image?\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n }\n }\n ]\n }\n ],\n \"max_tokens\": 300\n }'\n" - python: "from openai import OpenAI\n\nclient = OpenAI()\n\nresponse = client.chat.completions.create(\n model=\"gpt-4o\",\n messages=[\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image_url\",\n \"image_url\": \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n ],\n }\n ],\n max_tokens=300,\n)\n\nprint(response.choices[0])\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"What's in this image?\" },\n {\n type: \"image_url\",\n image_url:\n \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\",\n },\n ],\n },\n ],\n });\n console.log(response.choices[0]);\n}\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream=True\n)\n\nfor chunk in completion:\n print(chunk.choices[0].delta)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n model: \"VAR_model_id\",\n messages: [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n stream: true,\n });\n\n for await (const chunk of completion) {\n console.log(chunk.choices[0].delta.content);\n }\n}\n\nmain();" - response: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - - title: Functions - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-d '{\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What'\\''s the weather like in Boston today?\"\n }\n ],\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"tool_choice\": \"auto\"\n}'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\nmessages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}]\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=messages,\n tools=tools,\n tool_choice=\"auto\"\n)\n\nprint(completion)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messages = [{\"role\": \"user\", \"content\": \"What's the weather like in Boston today?\"}];\n const tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n ];\n\n const response = await openai.chat.completions.create({\n model: \"gpt-4o\",\n messages: messages,\n tools: tools,\n tool_choice: \"auto\",\n });\n\n console.log(response);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99\n }\n}\n" - - title: Logprobs - request: - curl: "curl https://api.openai.com/v1/chat/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Hello!\"\n }\n ],\n \"logprobs\": true,\n \"top_logprobs\": 2\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncompletion = client.chat.completions.create(\n model=\"VAR_model_id\",\n messages=[\n {\"role\": \"user\", \"content\": \"Hello!\"}\n ],\n logprobs=True,\n top_logprobs=2\n)\n\nprint(completion.choices[0].message)\nprint(completion.choices[0].logprobs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.chat.completions.create({\n messages: [{ role: \"user\", content: \"Hello!\" }],\n model: \"VAR_model_id\",\n logprobs: true,\n top_logprobs: 2,\n });\n\n console.log(completion.choices[0]);\n}\n\nmain();" - response: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1702685778,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"Hello! How can I assist you today?\"\n },\n \"logprobs\": {\n \"content\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111],\n \"top_logprobs\": [\n {\n \"token\": \"Hello\",\n \"logprob\": -0.31725305,\n \"bytes\": [72, 101, 108, 108, 111]\n },\n {\n \"token\": \"Hi\",\n \"logprob\": -1.3190403,\n \"bytes\": [72, 105]\n }\n ]\n },\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [\n 33\n ],\n \"top_logprobs\": [\n {\n \"token\": \"!\",\n \"logprob\": -0.02380986,\n \"bytes\": [33]\n },\n {\n \"token\": \" there\",\n \"logprob\": -3.787621,\n \"bytes\": [32, 116, 104, 101, 114, 101]\n }\n ]\n },\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119],\n \"top_logprobs\": [\n {\n \"token\": \" How\",\n \"logprob\": -0.000054669687,\n \"bytes\": [32, 72, 111, 119]\n },\n {\n \"token\": \"<|end|>\",\n \"logprob\": -10.953937,\n \"bytes\": null\n }\n ]\n },\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110],\n \"top_logprobs\": [\n {\n \"token\": \" can\",\n \"logprob\": -0.015801601,\n \"bytes\": [32, 99, 97, 110]\n },\n {\n \"token\": \" may\",\n \"logprob\": -4.161023,\n \"bytes\": [32, 109, 97, 121]\n }\n ]\n },\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [\n 32,\n 73\n ],\n \"top_logprobs\": [\n {\n \"token\": \" I\",\n \"logprob\": -3.7697225e-6,\n \"bytes\": [32, 73]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -13.596657,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n }\n ]\n },\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116],\n \"top_logprobs\": [\n {\n \"token\": \" assist\",\n \"logprob\": -0.04571125,\n \"bytes\": [32, 97, 115, 115, 105, 115, 116]\n },\n {\n \"token\": \" help\",\n \"logprob\": -3.1089056,\n \"bytes\": [32, 104, 101, 108, 112]\n }\n ]\n },\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117],\n \"top_logprobs\": [\n {\n \"token\": \" you\",\n \"logprob\": -5.4385737e-6,\n \"bytes\": [32, 121, 111, 117]\n },\n {\n \"token\": \" today\",\n \"logprob\": -12.807695,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n }\n ]\n },\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121],\n \"top_logprobs\": [\n {\n \"token\": \" today\",\n \"logprob\": -0.0040071653,\n \"bytes\": [32, 116, 111, 100, 97, 121]\n },\n {\n \"token\": \"?\",\n \"logprob\": -5.5247097,\n \"bytes\": [63]\n }\n ]\n },\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63],\n \"top_logprobs\": [\n {\n \"token\": \"?\",\n \"logprob\": -0.0008108172,\n \"bytes\": [63]\n },\n {\n \"token\": \"?\\n\",\n \"logprob\": -7.184561,\n \"bytes\": [63, 10]\n }\n ]\n }\n ]\n },\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 9,\n \"total_tokens\": 18\n },\n \"system_fingerprint\": null\n}\n" - /completions: - post: - tags: - - Completions - summary: Creates a completion for the provided prompt and parameters. - operationId: createCompletion - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateCompletionRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateCompletionResponse' - x-oaiMeta: - name: Create completion - group: completions - returns: "Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed.\n" - legacy: true - examples: - - title: No streaming - request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const completion = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n max_tokens: 7,\n temperature: 0,\n });\n\n console.log(completion);\n}\nmain();" - response: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"VAR_model_id\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/completions \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"VAR_model_id\",\n \"prompt\": \"Say this is a test\",\n \"max_tokens\": 7,\n \"temperature\": 0,\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nfor chunk in client.completions.create(\n model=\"VAR_model_id\",\n prompt=\"Say this is a test\",\n max_tokens=7,\n temperature=0,\n stream=True\n):\n print(chunk.choices[0].text)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.completions.create({\n model: \"VAR_model_id\",\n prompt: \"Say this is a test.\",\n stream: true,\n });\n\n for await (const chunk of stream) {\n console.log(chunk.choices[0].text)\n }\n}\nmain();" - response: "{\n \"id\": \"cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe\",\n \"object\": \"text_completion\",\n \"created\": 1690759702,\n \"choices\": [\n {\n \"text\": \"This\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": null\n }\n ],\n \"model\": \"gpt-3.5-turbo-instruct\"\n \"system_fingerprint\": \"fp_44709d6fcb\",\n}\n" - /images/generations: - post: - tags: - - Images - summary: Creates an image given a prompt. - operationId: createImage - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateImageRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - x-oaiMeta: - name: Create image - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/images/generations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"model\": \"dall-e-3\",\n \"prompt\": \"A cute baby sea otter\",\n \"n\": 1,\n \"size\": \"1024x1024\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.generate(\n model=\"dall-e-3\",\n prompt=\"A cute baby sea otter\",\n n=1,\n size=\"1024x1024\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.generate({ model: \"dall-e-3\", prompt: \"A cute baby sea otter\" });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/edits: - post: - tags: - - Images - summary: Creates an edited or extended image given an original image and a prompt. - operationId: createImageEdit - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageEditRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - x-oaiMeta: - name: Create image edit - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/images/edits \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F mask=\"@mask.png\" \\\n -F prompt=\"A cute baby sea otter wearing a beret\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.images.edit(\n image=open(\"otter.png\", \"rb\"),\n mask=open(\"mask.png\", \"rb\"),\n prompt=\"A cute baby sea otter wearing a beret\",\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.edit({\n image: fs.createReadStream(\"otter.png\"),\n mask: fs.createReadStream(\"mask.png\"),\n prompt: \"A cute baby sea otter wearing a beret\",\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /images/variations: - post: - tags: - - Images - summary: Creates a variation of a given image. - operationId: createImageVariation - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateImageVariationRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ImagesResponse' - x-oaiMeta: - name: Create image variation - group: images - returns: 'Returns a list of [image](/docs/api-reference/images/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/images/variations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F image=\"@otter.png\" \\\n -F n=2 \\\n -F size=\"1024x1024\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.images.create_variation(\n image=open(\"image_edit_original.png\", \"rb\"),\n n=2,\n size=\"1024x1024\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const image = await openai.images.createVariation({\n image: fs.createReadStream(\"otter.png\"),\n });\n\n console.log(image.data);\n}\nmain();" - response: "{\n \"created\": 1589478378,\n \"data\": [\n {\n \"url\": \"https://...\"\n },\n {\n \"url\": \"https://...\"\n }\n ]\n}\n" - /embeddings: - post: - tags: - - Embeddings - summary: Creates an embedding vector representing the input text. - operationId: createEmbedding - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateEmbeddingResponse' - x-oaiMeta: - name: Create embeddings - group: embeddings - returns: 'A list of [embedding](/docs/api-reference/embeddings/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/embeddings \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input\": \"The food was delicious and the waiter...\",\n \"model\": \"text-embedding-ada-002\",\n \"encoding_format\": \"float\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.embeddings.create(\n model=\"text-embedding-ada-002\",\n input=\"The food was delicious and the waiter...\",\n encoding_format=\"float\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const embedding = await openai.embeddings.create({\n model: \"text-embedding-ada-002\",\n input: \"The quick brown fox jumped over the lazy dog\",\n encoding_format: \"float\",\n });\n\n console.log(embedding);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n }\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\n \"prompt_tokens\": 8,\n \"total_tokens\": 8\n }\n}\n" - /audio/speech: - post: - tags: - - Audio - summary: Generates audio from the input text. - operationId: createSpeech - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateSpeechRequest' - required: true - responses: - '200': - description: OK - headers: - Transfer-Encoding: - description: chunked - schema: - type: string - content: - application/octet-stream: - schema: - type: string - format: binary - x-oaiMeta: - name: Create speech - group: audio - returns: The audio file content. - examples: - request: - curl: "curl https://api.openai.com/v1/audio/speech \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"model\": \"tts-1\",\n \"input\": \"The quick brown fox jumped over the lazy dog.\",\n \"voice\": \"alloy\"\n }' \\\n --output speech.mp3\n" - python: "from pathlib import Path\nimport openai\n\nspeech_file_path = Path(__file__).parent / \"speech.mp3\"\nresponse = openai.audio.speech.create(\n model=\"tts-1\",\n voice=\"alloy\",\n input=\"The quick brown fox jumped over the lazy dog.\"\n)\nresponse.stream_to_file(speech_file_path)\n" - node: "import fs from \"fs\";\nimport path from \"path\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst speechFile = path.resolve(\"./speech.mp3\");\n\nasync function main() {\n const mp3 = await openai.audio.speech.create({\n model: \"tts-1\",\n voice: \"alloy\",\n input: \"Today is a wonderful day to build something people love!\",\n });\n console.log(speechFile);\n const buffer = Buffer.from(await mp3.arrayBuffer());\n await fs.promises.writeFile(speechFile, buffer);\n}\nmain();\n" - /audio/transcriptions: - post: - tags: - - Audio - summary: Transcribes audio into the input language. - operationId: createTranscription - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranscriptionRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/CreateTranscriptionResponseJson' - - $ref: '#/components/schemas/CreateTranscriptionResponseVerboseJson' - x-oaiMeta: - name: Create transcription - group: audio - returns: 'The [transcription object](/docs/api-reference/audio/json-object) or a [verbose transcription object](/docs/api-reference/audio/verbose-json-object).' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/audio/transcriptions \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/audio.mp3\" \\\n -F model=\"whisper-1\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.transcriptions.create(\n model=\"whisper-1\",\n file=audio_file\n)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const transcription = await openai.audio.transcriptions.create({\n file: fs.createReadStream(\"audio.mp3\"),\n model: \"whisper-1\",\n });\n\n console.log(transcription.text);\n}\nmain();\n" - response: "{\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n}\n" - - title: Word timestamps - request: - curl: "curl https://api.openai.com/v1/audio/transcriptions \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/audio.mp3\" \\\n -F \"timestamp_granularities[]=word\" \\\n -F model=\"whisper-1\" \\\n -F response_format=\"verbose_json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.transcriptions.create(\n file=audio_file,\n model=\"whisper-1\",\n response_format=\"verbose_json\",\n timestamp_granularities=[\"word\"]\n)\n\nprint(transcript.words)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const transcription = await openai.audio.transcriptions.create({\n file: fs.createReadStream(\"audio.mp3\"),\n model: \"whisper-1\",\n response_format: \"verbose_json\",\n timestamp_granularities: [\"word\"]\n });\n\n console.log(transcription.text);\n}\nmain();\n" - response: "{\n \"task\": \"transcribe\",\n \"language\": \"english\",\n \"duration\": 8.470000267028809,\n \"text\": \"The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.\",\n \"words\": [\n {\n \"word\": \"The\",\n \"start\": 0.0,\n \"end\": 0.23999999463558197\n },\n ...\n {\n \"word\": \"volleyball\",\n \"start\": 7.400000095367432,\n \"end\": 7.900000095367432\n }\n ]\n}\n" - - title: Segment timestamps - request: - curl: "curl https://api.openai.com/v1/audio/transcriptions \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/audio.mp3\" \\\n -F \"timestamp_granularities[]=segment\" \\\n -F model=\"whisper-1\" \\\n -F response_format=\"verbose_json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.transcriptions.create(\n file=audio_file,\n model=\"whisper-1\",\n response_format=\"verbose_json\",\n timestamp_granularities=[\"segment\"]\n)\n\nprint(transcript.words)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const transcription = await openai.audio.transcriptions.create({\n file: fs.createReadStream(\"audio.mp3\"),\n model: \"whisper-1\",\n response_format: \"verbose_json\",\n timestamp_granularities: [\"segment\"]\n });\n\n console.log(transcription.text);\n}\nmain();\n" - response: "{\n \"task\": \"transcribe\",\n \"language\": \"english\",\n \"duration\": 8.470000267028809,\n \"text\": \"The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.\",\n \"segments\": [\n {\n \"id\": 0,\n \"seek\": 0,\n \"start\": 0.0,\n \"end\": 3.319999933242798,\n \"text\": \" The beach was a popular spot on a hot summer day.\",\n \"tokens\": [\n 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530\n ],\n \"temperature\": 0.0,\n \"avg_logprob\": -0.2860786020755768,\n \"compression_ratio\": 1.2363636493682861,\n \"no_speech_prob\": 0.00985979475080967\n },\n ...\n ]\n}\n" - /audio/translations: - post: - tags: - - Audio - summary: Translates audio into English. - operationId: createTranslation - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateTranslationRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/CreateTranslationResponseJson' - - $ref: '#/components/schemas/CreateTranslationResponseVerboseJson' - x-oaiMeta: - name: Create translation - group: audio - returns: The translated text. - examples: - request: - curl: "curl https://api.openai.com/v1/audio/translations \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: multipart/form-data\" \\\n -F file=\"@/path/to/file/german.m4a\" \\\n -F model=\"whisper-1\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\naudio_file = open(\"speech.mp3\", \"rb\")\ntranscript = client.audio.translations.create(\n model=\"whisper-1\",\n file=audio_file\n)\n" - node: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const translation = await openai.audio.translations.create({\n file: fs.createReadStream(\"speech.mp3\"),\n model: \"whisper-1\",\n });\n\n console.log(translation.text);\n}\nmain();\n" - response: "{\n \"text\": \"Hello, my name is Wolfgang and I come from Germany. Where are you heading today?\"\n}\n" - /files: - get: - tags: - - Files - summary: Returns a list of files that belong to the user's organization. - operationId: listFiles - parameters: - - name: purpose - in: query - description: Only return files with the given purpose. - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFilesResponse' - x-oaiMeta: - name: List files - group: files - returns: 'A list of [File](/docs/api-reference/files/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.files.list();\n\n for await (const file of list) {\n console.log(file);\n }\n}\n\nmain();" - response: "{\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 175,\n \"created_at\": 1613677385,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n },\n {\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 140,\n \"created_at\": 1613779121,\n \"filename\": \"puppy.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n ],\n \"object\": \"list\"\n}\n" - post: - tags: - - Files - summary: "Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.\n\nThe Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details.\n\nThe Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models.\n\nThe Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input).\n\nPlease [contact us](https://help.openai.com/) if you need to increase these storage limits.\n" - operationId: createFile - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/CreateFileRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFile' - x-oaiMeta: - name: Upload file - group: files - returns: 'The uploaded [File](/docs/api-reference/files/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -F purpose=\"fine-tune\" \\\n -F file=\"@mydata.jsonl\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.create(\n file=open(\"mydata.jsonl\", \"rb\"),\n purpose=\"fine-tune\"\n)\n" - node.js: "import fs from \"fs\";\nimport OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.create({\n file: fs.createReadStream(\"mydata.jsonl\"),\n purpose: \"fine-tune\",\n });\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}': - delete: - tags: - - Files - summary: Delete a file. - operationId: deleteFile - parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteFileResponse' - x-oaiMeta: - name: Delete file - group: files - returns: Deletion status. - examples: - request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.delete(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.del(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"deleted\": true\n}\n" - get: - tags: - - Files - summary: Returns information about a specific file. - operationId: retrieveFile - parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFile' - x-oaiMeta: - name: Retrieve file - group: files - returns: 'The [File](/docs/api-reference/files/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.files.retrieve(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.retrieve(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"mydata.jsonl\",\n \"purpose\": \"fine-tune\",\n}\n" - '/files/{file_id}/content': - get: - tags: - - Files - summary: Returns the contents of the specified file. - operationId: downloadFile - parameters: - - name: file_id - in: path - description: The ID of the file to use for this request. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - type: string - x-oaiMeta: - name: Retrieve file content - group: files - returns: The file content. - examples: - request: - curl: "curl https://api.openai.com/v1/files/file-abc123/content \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" > file.jsonl\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ncontent = client.files.content(\"file-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const file = await openai.files.content(\"file-abc123\");\n\n console.log(file);\n}\n\nmain();\n" - /uploads: - post: - tags: - - Uploads - summary: "Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it.\n\nOnce you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object.\n\nFor certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case:\n- [Assistants](/docs/assistants/tools/file-search/supported-files)\n\nFor guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).\n" - operationId: createUpload - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateUploadRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Upload' - x-oaiMeta: - name: Create upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `pending`.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"purpose\": \"fine-tune\",\n \"filename\": \"training_examples.jsonl\",\n \"bytes\": 2147483648,\n \"mime_type\": \"text/jsonl\"\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"pending\",\n \"expires_at\": 1719127296\n}\n" - '/uploads/{upload_id}/parts': - post: - tags: - - Uploads - summary: "Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. \n\nEach Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB.\n\nIt is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete).\n" - operationId: addUploadPart - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 - requestBody: - content: - multipart/form-data: - schema: - $ref: '#/components/schemas/AddUploadPartRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/UploadPart' - x-oaiMeta: - name: Add upload part - group: uploads - returns: 'The upload [Part](/docs/api-reference/uploads/part-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/parts\n -F data=\"aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz...\"\n" - response: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719185911,\n \"upload_id\": \"upload_abc123\"\n}\n" - '/uploads/{upload_id}/complete': - post: - tags: - - Uploads - summary: "Completes the [Upload](/docs/api-reference/uploads/object). \n\nWithin the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform.\n\nYou can specify the order of the Parts by passing in an ordered list of the Part IDs.\n\nThe number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed.\n" - operationId: completeUpload - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CompleteUploadRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Upload' - x-oaiMeta: - name: Complete upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `completed` with an additional `file` property containing the created usable File object.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/complete\n -d '{\n \"part_ids\": [\"part_def456\", \"part_ghi789\"]\n }'\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - '/uploads/{upload_id}/cancel': - post: - tags: - - Uploads - summary: "Cancels the Upload. No Parts may be added after an Upload is cancelled.\n" - operationId: cancelUpload - parameters: - - name: upload_id - in: path - description: "The ID of the Upload.\n" - required: true - schema: - type: string - example: upload_abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Upload' - x-oaiMeta: - name: Cancel upload - group: uploads - returns: 'The [Upload](/docs/api-reference/uploads/object) object with status `cancelled`.' - examples: - request: - curl: "curl https://api.openai.com/v1/uploads/upload_abc123/cancel\n" - response: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"cancelled\",\n \"expires_at\": 1719127296\n}\n" - /fine_tuning/jobs: - post: - tags: - - Fine-tuning - summary: "Creates a fine-tuning job which begins the process of creating a new model from a given dataset.\n\nResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)\n" - operationId: createFineTuningJob - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateFineTuningJobRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - x-oaiMeta: - name: Create fine-tuning job - group: fine-tuning - returns: 'A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-BK7bzQj3FfZFXr7DbL6xJwfo\",\n \"model\": \"gpt-4o-mini\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n model=\"gpt-4o-mini\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\"\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n}\n" - - title: Epochs - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"hyperparameters\": {\n \"n_epochs\": 2\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n model=\"gpt-4o-mini\",\n hyperparameters={\n \"n_epochs\":2\n }\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\",\n model: \"gpt-4o-mini\",\n hyperparameters: { n_epochs: 2 }\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\"n_epochs\": 2},\n}\n" - - title: Validation file - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"validation_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.create(\n training_file=\"file-abc123\",\n validation_file=\"file-def456\",\n model=\"gpt-4o-mini\"\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.create({\n training_file: \"file-abc123\",\n validation_file: \"file-abc123\"\n });\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n}\n" - - title: W&B Integration - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"training_file\": \"file-abc123\",\n \"validation_file\": \"file-abc123\",\n \"model\": \"gpt-4o-mini\",\n \"integrations\": [\n {\n \"type\": \"wandb\",\n \"wandb\": {\n \"project\": \"my-wandb-project\",\n \"name\": \"ft-run-display-name\"\n \"tags\": [\n \"first-experiment\", \"v2\"\n ]\n }\n }\n ]\n }'\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"status\": \"queued\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\",\n \"integrations\": [\n {\n \"type\": \"wandb\",\n \"wandb\": {\n \"project\": \"my-wandb-project\",\n \"entity\": None,\n \"run_id\": \"ftjob-abc123\"\n }\n }\n ]\n}\n" - get: - tags: - - Fine-tuning - summary: "List your organization's fine-tuning jobs\n" - operationId: listPaginatedFineTuningJobs - parameters: - - name: after - in: query - description: Identifier for the last job from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of fine-tuning jobs to retrieve. - schema: - type: integer - default: 20 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListPaginatedFineTuningJobsResponse' - x-oaiMeta: - name: List fine-tuning jobs - group: fine-tuning - returns: 'A list of paginated [fine-tuning job](/docs/api-reference/fine-tuning/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.jobs.list();\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-TjX0lMfOniCZX64t9PUQT5hn\",\n \"created_at\": 1689813489,\n \"level\": \"warn\",\n \"message\": \"Fine tuning process stopping due to job cancellation\",\n \"data\": null,\n \"type\": \"message\"\n },\n { ... },\n { ... }\n ], \"has_more\": true\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}': - get: - tags: - - Fine-tuning - summary: "Get info about a fine-tuning job.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)\n" - operationId: retrieveFineTuningJob - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - x-oaiMeta: - name: Retrieve fine-tuning job - group: fine-tuning - returns: 'The [fine-tuning](/docs/api-reference/fine-tuning/object) object with the given ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.retrieve(\"ftjob-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.retrieve(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\n\nmain();\n" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/events': - get: - tags: - - Fine-tuning - summary: "Get status updates for a fine-tuning job.\n" - operationId: listFineTuningEvents - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to get events for.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - - name: after - in: query - description: Identifier for the last event from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of events to retrieve. - schema: - type: integer - default: 20 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuningJobEventsResponse' - x-oaiMeta: - name: List fine-tuning events - group: fine-tuning - returns: A list of fine-tuning event objects. - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.list_events(\n fine_tuning_job_id=\"ftjob-abc123\",\n limit=2\n)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.fineTuning.list_events(id=\"ftjob-abc123\", limit=2);\n\n for await (const fineTune of list) {\n console.log(fineTune);\n }\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-ddTJfwuMVpfLXseO0Am0Gqjm\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"Fine tuning job successfully completed\",\n \"data\": null,\n \"type\": \"message\"\n },\n {\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ft-event-tyiGuB72evQncpH87xe505Sv\",\n \"created_at\": 1721764800,\n \"level\": \"info\",\n \"message\": \"New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel\",\n \"data\": null,\n \"type\": \"message\"\n }\n ],\n \"has_more\": true\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/cancel': - post: - tags: - - Fine-tuning - summary: "Immediately cancel a fine-tune job.\n" - operationId: cancelFineTuningJob - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to cancel.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/FineTuningJob' - x-oaiMeta: - name: Cancel fine-tuning - group: fine-tuning - returns: 'The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.fine_tuning.jobs.cancel(\"ftjob-abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const fineTune = await openai.fineTuning.jobs.cancel(\"ftjob-abc123\");\n\n console.log(fineTune);\n}\nmain();" - response: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"created_at\": 1721764800,\n \"fine_tuned_model\": null,\n \"organization_id\": \"org-123\",\n \"result_files\": [],\n \"hyperparameters\": {\n \"n_epochs\": \"auto\"\n },\n \"status\": \"cancelled\",\n \"validation_file\": \"file-abc123\",\n \"training_file\": \"file-abc123\"\n}\n" - '/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints': - get: - tags: - - Fine-tuning - summary: "List checkpoints for a fine-tuning job.\n" - operationId: listFineTuningJobCheckpoints - parameters: - - name: fine_tuning_job_id - in: path - description: "The ID of the fine-tuning job to get checkpoints for.\n" - required: true - schema: - type: string - example: ft-AF1WoRqd3aJAHsqc9NY7iL8F - - name: after - in: query - description: Identifier for the last checkpoint ID from the previous pagination request. - schema: - type: string - - name: limit - in: query - description: Number of checkpoints to retrieve. - schema: - type: integer - default: 10 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFineTuningJobCheckpointsResponse' - x-oaiMeta: - name: List fine-tuning checkpoints - group: fine-tuning - returns: 'A list of fine-tuning [checkpoint objects](/docs/api-reference/fine-tuning/checkpoint-object) for a fine-tuning job.' - examples: - request: - curl: "curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - response: "{\n \"object\": \"list\"\n \"data\": [\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"created_at\": 1721764867,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000\",\n \"metrics\": {\n \"full_valid_loss\": 0.134,\n \"full_valid_mean_token_accuracy\": 0.874\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 2000,\n },\n {\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"created_at\": 1721764800,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000\",\n \"metrics\": {\n \"full_valid_loss\": 0.167,\n \"full_valid_mean_token_accuracy\": 0.781\n },\n \"fine_tuning_job_id\": \"ftjob-abc123\",\n \"step_number\": 1000,\n },\n ],\n \"first_id\": \"ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"last_id\": \"ftckpt_enQCFmOTGj3syEpYVhBRLTSy\",\n \"has_more\": true\n}\n" - /models: - get: - tags: - - Models - summary: 'Lists the currently available models, and provides basic information about each one such as the owner and availability.' - operationId: listModels - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListModelsResponse' - x-oaiMeta: - name: List models - group: models - returns: 'A list of [model](/docs/api-reference/models/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/models \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.list()\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.models.list();\n\n for await (const model of list) {\n console.log(model);\n }\n}\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"model-id-0\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"organization-owner\"\n },\n {\n \"id\": \"model-id-1\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"organization-owner\",\n },\n {\n \"id\": \"model-id-2\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n },\n ],\n \"object\": \"list\"\n}\n" - '/models/{model}': - get: - tags: - - Models - summary: 'Retrieves a model instance, providing basic information about the model such as the owner and permissioning.' - operationId: retrieveModel - parameters: - - name: model - in: path - description: The ID of the model to use for this request - required: true - schema: - type: string - example: gpt-4o-mini - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Model' - x-oaiMeta: - name: Retrieve model - group: models - returns: 'The [model](/docs/api-reference/models/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/models/VAR_model_id \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.retrieve(\"VAR_model_id\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const model = await openai.models.retrieve(\"VAR_model_id\");\n\n console.log(model);\n}\n\nmain();" - response: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" - delete: - tags: - - Models - summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - operationId: deleteModel - parameters: - - name: model - in: path - description: The model to delete - required: true - schema: - type: string - example: ft:gpt-4o-mini:acemeco:suffix:abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteModelResponse' - x-oaiMeta: - name: Delete a fine-tuned model - group: models - returns: Deletion status. - examples: - request: - curl: "curl https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 \\\n -X DELETE \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.models.delete(\"ft:gpt-4o-mini:acemeco:suffix:abc123\")\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const model = await openai.models.del(\"ft:gpt-4o-mini:acemeco:suffix:abc123\");\n\n console.log(model);\n}\nmain();" - response: "{\n \"id\": \"ft:gpt-4o-mini:acemeco:suffix:abc123\",\n \"object\": \"model\",\n \"deleted\": true\n}\n" - /moderations: - post: - tags: - - Moderations - summary: Classifies if text is potentially harmful. - operationId: createModeration - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateModerationRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateModerationResponse' - x-oaiMeta: - name: Create moderation - group: moderations - returns: 'A [moderation](/docs/api-reference/moderations/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/moderations \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -d '{\n \"input\": \"I want to kill them.\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmoderation = client.moderations.create(input=\"I want to kill them.\")\nprint(moderation)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const moderation = await openai.moderations.create({ input: \"I want to kill them.\" });\n\n console.log(moderation);\n}\nmain();\n" - response: "{\n \"id\": \"modr-XXXXX\",\n \"model\": \"text-moderation-005\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"sexual\": false,\n \"hate\": false,\n \"harassment\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"hate/threatening\": false,\n \"violence/graphic\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"harassment/threatening\": true,\n \"violence\": true,\n },\n \"category_scores\": {\n \"sexual\": 1.2282071e-06,\n \"hate\": 0.010696256,\n \"harassment\": 0.29842457,\n \"self-harm\": 1.5236925e-08,\n \"sexual/minors\": 5.7246268e-08,\n \"hate/threatening\": 0.0060676364,\n \"violence/graphic\": 4.435014e-06,\n \"self-harm/intent\": 8.098441e-10,\n \"self-harm/instructions\": 2.8498655e-11,\n \"harassment/threatening\": 0.63055265,\n \"violence\": 0.99011886,\n }\n }\n ]\n}\n" - /assistants: - get: - tags: - - Assistants - summary: Returns a list of assistants. - operationId: listAssistants - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListAssistantsResponse' - x-oaiMeta: - name: List assistants - group: assistants - beta: true - returns: 'A list of [assistant](/docs/api-reference/assistants/object) objects.' - examples: - request: - curl: "curl \"https://api.openai.com/v1/assistants?order=desc&limit=20\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistants = client.beta.assistants.list(\n order=\"desc\",\n limit=\"20\",\n)\nprint(my_assistants.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistants = await openai.beta.assistants.list({\n order: \"desc\",\n limit: \"20\",\n });\n\n console.log(myAssistants.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - post: - tags: - - Assistants - summary: Create an assistant with a model and instructions. - operationId: createAssistant - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAssistantRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - x-oaiMeta: - name: Create assistant - group: assistants - beta: true - returns: 'An [assistant](/docs/api-reference/assistants/object) object.' - examples: - - title: Code Interpreter - request: - curl: "curl \"https://api.openai.com/v1/assistants\" \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"name\": \"Math Tutor\",\n \"tools\": [{\"type\": \"code_interpreter\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name=\"Math Tutor\",\n tools=[{\"type\": \"code_interpreter\"}],\n model=\"gpt-4o\",\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n name: \"Math Tutor\",\n tools: [{ type: \"code_interpreter\" }],\n model: \"gpt-4o\",\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - - title: Files - request: - curl: "curl https://api.openai.com/v1/assistants \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"tool_resources\": {\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.create(\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n tool_resources={\"file_search\": {\"vector_store_ids\": [\"vs_123\"]}},\n model=\"gpt-4o\"\n)\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.create({\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n tool_resources: {\n file_search: {\n vector_store_ids: [\"vs_123\"]\n }\n },\n model: \"gpt-4o\"\n });\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009403,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - '/assistants/{assistant_id}': - get: - tags: - - Assistants - summary: Retrieves an assistant. - operationId: getAssistant - parameters: - - name: assistant_id - in: path - description: The ID of the assistant to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - x-oaiMeta: - name: Retrieve assistant - group: assistants - beta: true - returns: 'The [assistant](/docs/api-reference/assistants/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_assistant = client.beta.assistants.retrieve(\"asst_abc123\")\nprint(my_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myAssistant = await openai.beta.assistants.retrieve(\n \"asst_abc123\"\n );\n\n console.log(myAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - post: - tags: - - Assistants - summary: Modifies an assistant. - operationId: modifyAssistant - parameters: - - name: assistant_id - in: path - description: The ID of the assistant to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyAssistantRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/AssistantObject' - x-oaiMeta: - name: Modify assistant - group: assistants - beta: true - returns: 'The modified [assistant](/docs/api-reference/assistants/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [{\"type\": \"file_search\"}],\n \"model\": \"gpt-4o\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_assistant = client.beta.assistants.update(\n \"asst_abc123\",\n instructions=\"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name=\"HR Helper\",\n tools=[{\"type\": \"file_search\"}],\n model=\"gpt-4o\"\n)\n\nprint(my_updated_assistant)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myUpdatedAssistant = await openai.beta.assistants.update(\n \"asst_abc123\",\n {\n instructions:\n \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n name: \"HR Helper\",\n tools: [{ type: \"file_search\" }],\n model: \"gpt-4o\"\n }\n );\n\n console.log(myUpdatedAssistant);\n}\n\nmain();" - response: "{\n \"id\": \"asst_123\",\n \"object\": \"assistant\",\n \"created_at\": 1699009709,\n \"name\": \"HR Helper\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": []\n }\n },\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - delete: - tags: - - Assistants - summary: Delete an assistant. - operationId: deleteAssistant - parameters: - - name: assistant_id - in: path - description: The ID of the assistant to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteAssistantResponse' - x-oaiMeta: - name: Delete assistant - group: assistants - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/assistants/asst_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.assistants.delete(\"asst_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.assistants.del(\"asst_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant.deleted\",\n \"deleted\": true\n}\n" - /threads: - post: - tags: - - Assistants - summary: Create a thread. - operationId: createThread - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateThreadRequest' - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - x-oaiMeta: - name: Create thread - group: threads - beta: true - returns: 'A [thread](/docs/api-reference/threads) object.' - examples: - - title: Empty - request: - curl: "curl https://api.openai.com/v1/threads \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d ''\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nempty_thread = client.beta.threads.create()\nprint(empty_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const emptyThread = await openai.beta.threads.create();\n\n console.log(emptyThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699012949,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - - title: Messages - request: - curl: "curl https://api.openai.com/v1/threads \\\n-H \"Content-Type: application/json\" \\\n-H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n-H \"OpenAI-Beta: assistants=v2\" \\\n-d '{\n \"messages\": [{\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n }, {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage_thread = client.beta.threads.create(\n messages=[\n {\n \"role\": \"user\",\n \"content\": \"Hello, what is AI?\"\n },\n {\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n },\n ]\n)\n\nprint(message_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const messageThread = await openai.beta.threads.create({\n messages: [\n {\n role: \"user\",\n content: \"Hello, what is AI?\"\n },\n {\n role: \"user\",\n content: \"How does AI work? Explain it in simple terms.\",\n },\n ],\n });\n\n console.log(messageThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {}\n}\n" - '/threads/{thread_id}': - get: - tags: - - Assistants - summary: Retrieves a thread. - operationId: getThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - x-oaiMeta: - name: Retrieve thread - group: threads - beta: true - returns: 'The [thread](/docs/api-reference/threads/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_thread = client.beta.threads.retrieve(\"thread_abc123\")\nprint(my_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const myThread = await openai.beta.threads.retrieve(\n \"thread_abc123\"\n );\n\n console.log(myThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {},\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": []\n }\n }\n}\n" - post: - tags: - - Assistants - summary: Modifies a thread. - operationId: modifyThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to modify. Only the `metadata` can be modified. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyThreadRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ThreadObject' - x-oaiMeta: - name: Modify thread - group: threads - beta: true - returns: 'The modified [thread](/docs/api-reference/threads/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmy_updated_thread = client.beta.threads.update(\n \"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n)\nprint(my_updated_thread)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const updatedThread = await openai.beta.threads.update(\n \"thread_abc123\",\n {\n metadata: { modified: \"true\", user: \"abc123\" },\n }\n );\n\n console.log(updatedThread);\n}\n\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1699014083,\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n },\n \"tool_resources\": {}\n}\n" - delete: - tags: - - Assistants - summary: Delete a thread. - operationId: deleteThread - parameters: - - name: thread_id - in: path - description: The ID of the thread to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteThreadResponse' - x-oaiMeta: - name: Delete thread - group: threads - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nresponse = client.beta.threads.delete(\"thread_abc123\")\nprint(response)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const response = await openai.beta.threads.del(\"thread_abc123\");\n\n console.log(response);\n}\nmain();" - response: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread.deleted\",\n \"deleted\": true\n}\n" - '/threads/{thread_id}/messages': - get: - tags: - - Assistants - summary: Returns a list of messages for a given thread. - operationId: listMessages - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) the messages belong to.' - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: run_id - in: query - description: "Filter messages by the run ID that generated them.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListMessagesResponse' - x-oaiMeta: - name: List messages - group: threads - beta: true - returns: 'A list of [message](/docs/api-reference/messages) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_messages = client.beta.threads.messages.list(\"thread_abc123\")\nprint(thread_messages.data)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.list(\n \"thread_abc123\"\n );\n\n console.log(threadMessages.data);\n}\n\nmain();" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n },\n {\n \"id\": \"msg_abc456\",\n \"object\": \"thread.message\",\n \"created_at\": 1699016383,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hello, what is AI?\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n }\n ],\n \"first_id\": \"msg_abc123\",\n \"last_id\": \"msg_abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Assistants - summary: Create a message. - operationId: createMessage - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to create a message for.' - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateMessageRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - x-oaiMeta: - name: Create message - group: threads - beta: true - returns: 'A [message](/docs/api-reference/messages/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"role\": \"user\",\n \"content\": \"How does AI work? Explain it in simple terms.\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nthread_message = client.beta.threads.messages.create(\n \"thread_abc123\",\n role=\"user\",\n content=\"How does AI work? Explain it in simple terms.\",\n)\nprint(thread_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const threadMessages = await openai.beta.threads.messages.create(\n \"thread_abc123\",\n { role: \"user\", content: \"How does AI work? Explain it in simple terms.\" }\n );\n\n console.log(threadMessages);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1713226573,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" - '/threads/{thread_id}/messages/{message_id}': - get: - tags: - - Assistants - summary: Retrieve a message. - operationId: getMessage - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this message belongs.' - required: true - schema: - type: string - - name: message_id - in: path - description: The ID of the message to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - x-oaiMeta: - name: Retrieve message - group: threads - beta: true - returns: 'The [message](/docs/api-reference/messages/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.retrieve(\n message_id=\"msg_abc123\",\n thread_id=\"thread_abc123\",\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.retrieve(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(message);\n}\n\nmain();" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"attachments\": [],\n \"metadata\": {}\n}\n" - post: - tags: - - Assistants - summary: Modifies a message. - operationId: modifyMessage - parameters: - - name: thread_id - in: path - description: The ID of the thread to which this message belongs. - required: true - schema: - type: string - - name: message_id - in: path - description: The ID of the message to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyMessageRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/MessageObject' - x-oaiMeta: - name: Modify message - group: threads - beta: true - returns: 'The modified [message](/docs/api-reference/messages/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nmessage = client.beta.threads.messages.update(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n metadata={\n \"modified\": \"true\",\n \"user\": \"abc123\",\n },\n)\nprint(message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const message = await openai.beta.threads.messages.update(\n \"thread_abc123\",\n \"msg_abc123\",\n {\n metadata: {\n modified: \"true\",\n user: \"abc123\",\n },\n }\n }'" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1699017614,\n \"assistant_id\": null,\n \"thread_id\": \"thread_abc123\",\n \"run_id\": null,\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"How does AI work? Explain it in simple terms.\",\n \"annotations\": []\n }\n }\n ],\n \"file_ids\": [],\n \"metadata\": {\n \"modified\": \"true\",\n \"user\": \"abc123\"\n }\n}\n" - delete: - tags: - - Assistants - summary: Deletes a message. - operationId: deleteMessage - parameters: - - name: thread_id - in: path - description: The ID of the thread to which this message belongs. - required: true - schema: - type: string - - name: message_id - in: path - description: The ID of the message to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteMessageResponse' - x-oaiMeta: - name: Delete message - group: threads - beta: true - returns: Deletion status - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \\\n -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_message = client.beta.threads.messages.delete(\n message_id=\"msg_abc12\",\n thread_id=\"thread_abc123\",\n)\nprint(deleted_message)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedMessage = await openai.beta.threads.messages.del(\n \"thread_abc123\",\n \"msg_abc123\"\n );\n\n console.log(deletedMessage);\n}" - response: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message.deleted\",\n \"deleted\": true\n}\n" - /threads/runs: - post: - tags: - - Assistants - summary: Create a thread and run it in one request. - operationId: createThreadAndRun - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateThreadAndRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Create thread and run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.create_and_run(\n assistant_id=\"asst_abc123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Explain deep learning to a 5 year old.\"}\n ]\n }\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_abc123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Explain deep learning to a 5 year old.\" },\n ],\n },\n });\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076792,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": null,\n \"expires_at\": 1699077392,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"required_action\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant.\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completion_tokens\": null,\n \"max_prompt_tokens\": null,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"incomplete_details\": null,\n \"usage\": null,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.create_and_run(\n assistant_id=\"asst_123\",\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello\"}\n ]\n },\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"Hello\" },\n ],\n },\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710348075,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"tool_resources\":{},\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[], \"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}], \"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\n{\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1713226836,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1713226837,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"thread\": {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.create_and_run(\n thread={\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather like in San Francisco?\"}\n ]\n },\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.createAndRun({\n assistant_id: \"asst_123\",\n thread: {\n messages: [\n { role: \"user\", content: \"What is the weather like in San Francisco?\" },\n ],\n },\n tools: tools,\n stream: true\n });\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.created\ndata: {\"id\":\"thread_123\",\"object\":\"thread\",\"created_at\":1710351818,\"metadata\":{}}\n\nevent: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710351819,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710352418,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[]},\"usage\":null}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"\",\"output\":null}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"{\\\"\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"location\"}}]}}}\n\n...\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"ahrenheit\"}}]}}}\n\nevent: thread.run.step.delta\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step.delta\",\"delta\":{\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"index\":0,\"type\":\"function\",\"function\":{\"arguments\":\"\\\"}\"}}]}}}\n\nevent: thread.run.requires_action\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710351818,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"requires_action\",\"started_at\":1710351818,\"expires_at\":1710352418,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":{\"type\":\"submit_tool_outputs\",\"submit_tool_outputs\":{\"tool_calls\":[{\"id\":\"call_XXNp8YGaFrjrSjgqxtC8JJ1B\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\"}}]}},\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":345,\"completion_tokens\":11,\"total_tokens\":356},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs': - get: - tags: - - Assistants - summary: Returns a list of runs belonging to a thread. - operationId: listRuns - parameters: - - name: thread_id - in: path - description: The ID of the thread the run belongs to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListRunsResponse' - x-oaiMeta: - name: List runs - group: threads - beta: true - returns: 'A list of [run](/docs/api-reference/runs/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nruns = client.beta.threads.runs.list(\n \"thread_abc123\"\n)\n\nprint(runs)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const runs = await openai.beta.threads.runs.list(\n \"thread_abc123\"\n );\n\n console.log(runs);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n },\n {\n \"id\": \"run_abc456\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n }\n ],\n \"first_id\": \"run_abc123\",\n \"last_id\": \"run_abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Assistants - summary: Create a run. - operationId: createRun - parameters: - - name: thread_id - in: path - description: The ID of the thread to run. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Create run - group: threads - beta: true - returns: 'A [run](/docs/api-reference/runs/object) object.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n { assistant_id: \"asst_abc123\" }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699063290,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"queued\",\n \"started_at\": 1699063290,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699063291,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_123\",\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_123\",\n assistant_id=\"asst_123\",\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_123\",\n { assistant_id: \"asst_123\", stream: true }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710330641,\"expires_at\":1710331240,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710330641,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710330642,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710330641,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710330642,\"expires_at\":1710331240,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710330640,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710330641,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710330642,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - - title: Streaming with Functions - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"assistant_id\": \"asst_abc123\",\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ntools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n]\n\nstream = client.beta.threads.runs.create(\n thread_id=\"thread_abc123\",\n assistant_id=\"asst_abc123\",\n tools=tools,\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nconst tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n },\n \"unit\": {\"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"]},\n },\n \"required\": [\"location\"],\n },\n }\n }\n];\n\nasync function main() {\n const stream = await openai.beta.threads.runs.create(\n \"thread_abc123\",\n {\n assistant_id: \"asst_abc123\",\n tools: tools,\n stream: true\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.created\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":null,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710348075,\"expires_at\":1710348675,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"Hello\",\"annotations\":[]}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" today\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"?\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_001\",\"object\":\"thread.message\",\"created_at\":1710348076,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710348077,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"Hello! How can I assist you today?\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710348076,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710348077,\"expires_at\":1710348675,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_001\"}},\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710348075,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710348075,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710348077,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}': - get: - tags: - - Assistants - summary: Retrieves a run. - operationId: getRun - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Retrieve run - group: threads - beta: true - returns: 'The [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.retrieve(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - post: - tags: - - Assistants - summary: Modifies a run. - operationId: modifyRun - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ModifyRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Modify run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n }\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.update(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n metadata={\"user_id\": \"user_abc123\"},\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.update(\n \"thread_abc123\",\n \"run_abc123\",\n {\n metadata: {\n user_id: \"user_abc123\",\n },\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075072,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699075072,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699075073,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"incomplete_details\": null,\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"tool_resources\": {\n \"code_interpreter\": {\n \"file_ids\": [\n \"file-abc123\",\n \"file-abc456\"\n ]\n }\n },\n \"metadata\": {\n \"user_id\": \"user_abc123\"\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/submit_tool_outputs': - post: - tags: - - Assistants - summary: "When a run has the `status: \"requires_action\"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request.\n" - operationId: submitToolOuputsToRun - parameters: - - name: thread_id - in: path - description: 'The ID of the [thread](/docs/api-reference/threads) to which this run belongs.' - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run that requires the tool output submission. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SubmitToolOutputsRunRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Submit tool outputs to run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - - title: Default - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ]\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699075592,\n \"assistant_id\": \"asst_123\",\n \"thread_id\": \"thread_123\",\n \"status\": \"queued\",\n \"started_at\": 1699075592,\n \"expires_at\": 1699076192,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather in a given location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"]\n }\n },\n \"required\": [\"location\"]\n }\n }\n }\n ],\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - - title: Streaming - request: - curl: "curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"tool_outputs\": [\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n \"stream\": true\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nstream = client.beta.threads.runs.submit_tool_outputs(\n thread_id=\"thread_123\",\n run_id=\"run_123\",\n tool_outputs=[\n {\n \"tool_call_id\": \"call_001\",\n \"output\": \"70 degrees and sunny.\"\n }\n ],\n stream=True\n)\n\nfor event in stream:\n print(event)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const stream = await openai.beta.threads.runs.submitToolOutputs(\n \"thread_123\",\n \"run_123\",\n {\n tool_outputs: [\n {\n tool_call_id: \"call_001\",\n output: \"70 degrees and sunny.\",\n },\n ],\n }\n );\n\n for await (const event of stream) {\n console.log(event);\n }\n}\n\nmain();\n" - response: "event: thread.run.step.completed\ndata: {\"id\":\"step_001\",\"object\":\"thread.run.step\",\"created_at\":1710352449,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"tool_calls\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352475,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"tool_calls\",\"tool_calls\":[{\"id\":\"call_iWr0kQ2EaYMaxNdl0v3KYkx7\",\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"arguments\":\"{\\\"location\\\":\\\"San Francisco, CA\\\",\\\"unit\\\":\\\"fahrenheit\\\"}\",\"output\":\"70 degrees and sunny.\"}}]},\"usage\":{\"prompt_tokens\":291,\"completion_tokens\":24,\"total_tokens\":315}}\n\nevent: thread.run.queued\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"queued\",\"started_at\":1710352448,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.in_progress\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"in_progress\",\"started_at\":1710352475,\"expires_at\":1710353047,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":null,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":null,\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: thread.run.step.created\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.run.step.in_progress\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"in_progress\",\"cancelled_at\":null,\"completed_at\":null,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":null}\n\nevent: thread.message.created\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.in_progress\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"in_progress\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":null,\"role\":\"assistant\",\"content\":[],\"metadata\":{}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\"The\",\"annotations\":[]}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" current\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" weather\"}}]}}\n\n...\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\" sunny\"}}]}}\n\nevent: thread.message.delta\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message.delta\",\"delta\":{\"content\":[{\"index\":0,\"type\":\"text\",\"text\":{\"value\":\".\"}}]}}\n\nevent: thread.message.completed\ndata: {\"id\":\"msg_002\",\"object\":\"thread.message\",\"created_at\":1710352476,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"run_id\":\"run_123\",\"status\":\"completed\",\"incomplete_details\":null,\"incomplete_at\":null,\"completed_at\":1710352477,\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":{\"value\":\"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.\",\"annotations\":[]}}],\"metadata\":{}}\n\nevent: thread.run.step.completed\ndata: {\"id\":\"step_002\",\"object\":\"thread.run.step\",\"created_at\":1710352476,\"run_id\":\"run_123\",\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"type\":\"message_creation\",\"status\":\"completed\",\"cancelled_at\":null,\"completed_at\":1710352477,\"expires_at\":1710353047,\"failed_at\":null,\"last_error\":null,\"step_details\":{\"type\":\"message_creation\",\"message_creation\":{\"message_id\":\"msg_002\"}},\"usage\":{\"prompt_tokens\":329,\"completion_tokens\":18,\"total_tokens\":347}}\n\nevent: thread.run.completed\ndata: {\"id\":\"run_123\",\"object\":\"thread.run\",\"created_at\":1710352447,\"assistant_id\":\"asst_123\",\"thread_id\":\"thread_123\",\"status\":\"completed\",\"started_at\":1710352475,\"expires_at\":null,\"cancelled_at\":null,\"failed_at\":null,\"completed_at\":1710352477,\"required_action\":null,\"last_error\":null,\"model\":\"gpt-4o\",\"instructions\":null,\"tools\":[{\"type\":\"function\",\"function\":{\"name\":\"get_current_weather\",\"description\":\"Get the current weather in a given location\",\"parameters\":{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\",\"description\":\"The city and state, e.g. San Francisco, CA\"},\"unit\":{\"type\":\"string\",\"enum\":[\"celsius\",\"fahrenheit\"]}},\"required\":[\"location\"]}}}],\"metadata\":{},\"temperature\":1.0,\"top_p\":1.0,\"max_completion_tokens\":null,\"max_prompt_tokens\":null,\"truncation_strategy\":{\"type\":\"auto\",\"last_messages\":null},\"incomplete_details\":null,\"usage\":{\"prompt_tokens\":20,\"completion_tokens\":11,\"total_tokens\":31},\"response_format\":\"auto\",\"tool_choice\":\"auto\",\"parallel_tool_calls\":true}}\n\nevent: done\ndata: [DONE]\n" - '/threads/{thread_id}/runs/{run_id}/cancel': - post: - tags: - - Assistants - summary: Cancels a run that is `in_progress`. - operationId: cancelRun - parameters: - - name: thread_id - in: path - description: The ID of the thread to which this run belongs. - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to cancel. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunObject' - x-oaiMeta: - name: Cancel a run - group: threads - beta: true - returns: 'The modified [run](/docs/api-reference/runs/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun = client.beta.threads.runs.cancel(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run)\n" - node.js: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const run = await openai.beta.threads.runs.cancel(\n \"thread_abc123\",\n \"run_abc123\"\n );\n\n console.log(run);\n}\n\nmain();\n" - response: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1699076126,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"cancelling\",\n \"started_at\": 1699076126,\n \"expires_at\": 1699076726,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": null,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You summarize books.\",\n \"tools\": [\n {\n \"type\": \"file_search\"\n }\n ],\n \"tool_resources\": {\n \"file_search\": {\n \"vector_store_ids\": [\"vs_123\"]\n }\n },\n \"metadata\": {},\n \"usage\": null,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps': - get: - tags: - - Assistants - summary: Returns a list of run steps belonging to a run. - operationId: listRunSteps - parameters: - - name: thread_id - in: path - description: The ID of the thread the run and run steps belong to. - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run the run steps belong to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListRunStepsResponse' - x-oaiMeta: - name: List run steps - group: threads - beta: true - returns: 'A list of [run step](/docs/api-reference/runs/step-object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_steps = client.beta.threads.runs.steps.list(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\"\n)\n\nprint(run_steps)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.list(\n \"thread_abc123\",\n \"run_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n }\n ],\n \"first_id\": \"step_abc123\",\n \"last_id\": \"step_abc456\",\n \"has_more\": false\n}\n" - '/threads/{thread_id}/runs/{run_id}/steps/{step_id}': - get: - tags: - - Assistants - summary: Retrieves a run step. - operationId: getRunStep - parameters: - - name: thread_id - in: path - description: The ID of the thread to which the run and run step belongs. - required: true - schema: - type: string - - name: run_id - in: path - description: The ID of the run to which the run step belongs. - required: true - schema: - type: string - - name: step_id - in: path - description: The ID of the run step to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/RunStepObject' - x-oaiMeta: - name: Retrieve run step - group: threads - beta: true - returns: 'The [run step](/docs/api-reference/runs/step-object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nrun_step = client.beta.threads.runs.steps.retrieve(\n thread_id=\"thread_abc123\",\n run_id=\"run_abc123\",\n step_id=\"step_abc123\"\n)\n\nprint(run_step)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const runStep = await openai.beta.threads.runs.steps.retrieve(\n \"thread_abc123\",\n \"run_abc123\",\n \"step_abc123\"\n );\n console.log(runStep);\n}\n\nmain();\n" - response: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - /vector_stores: - get: - tags: - - Vector Stores - summary: Returns a list of vector stores. - operationId: listVectorStores - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorStoresResponse' - x-oaiMeta: - name: List vector stores - group: vector_stores - beta: true - returns: 'A list of [vector store](/docs/api-reference/vector-stores/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_stores = client.beta.vector_stores.list()\nprint(vector_stores)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStores = await openai.beta.vectorStores.list();\n console.log(vectorStores);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n },\n {\n \"id\": \"vs_abc456\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ v2\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n }\n ],\n \"first_id\": \"vs_abc123\",\n \"last_id\": \"vs_abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Vector Stores - summary: Create a vector store. - operationId: createVectorStore - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreObject' - x-oaiMeta: - name: Create vector store - group: vector_stores - beta: true - returns: 'A [vector store](/docs/api-reference/vector-stores/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.create(\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.create({\n name: \"Support FAQ\"\n });\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" - '/vector_stores/{vector_store_id}': - get: - tags: - - Vector Stores - summary: Retrieves a vector store. - operationId: getVectorStore - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to retrieve. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreObject' - x-oaiMeta: - name: Retrieve vector store - group: vector_stores - beta: true - returns: 'The [vector store](/docs/api-reference/vector-stores/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.retrieve(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.retrieve(\n \"vs_abc123\"\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776\n}\n" - post: - tags: - - Vector Stores - summary: Modifies a vector store. - operationId: modifyVectorStore - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to modify. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/UpdateVectorStoreRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreObject' - x-oaiMeta: - name: Modify vector store - group: vector_stores - beta: true - returns: 'The modified [vector store](/docs/api-reference/vector-stores/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n -d '{\n \"name\": \"Support FAQ\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store = client.beta.vector_stores.update(\n vector_store_id=\"vs_abc123\",\n name=\"Support FAQ\"\n)\nprint(vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStore = await openai.beta.vectorStores.update(\n \"vs_abc123\",\n {\n name: \"Support FAQ\"\n }\n );\n console.log(vectorStore);\n}\n\nmain();\n" - response: "{\n \"id\": \"vs_abc123\",\n \"object\": \"vector_store\",\n \"created_at\": 1699061776,\n \"name\": \"Support FAQ\",\n \"bytes\": 139920,\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 3\n }\n}\n" - delete: - tags: - - Vector Stores - summary: Delete a vector store. - operationId: deleteVectorStore - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteVectorStoreResponse' - x-oaiMeta: - name: Delete vector store - group: vector_stores - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store = client.beta.vector_stores.delete(\n vector_store_id=\"vs_abc123\"\n)\nprint(deleted_vector_store)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStore = await openai.beta.vectorStores.del(\n \"vs_abc123\"\n );\n console.log(deletedVectorStore);\n}\n\nmain();\n" - response: "{\n id: \"vs_abc123\",\n object: \"vector_store.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/files': - get: - tags: - - Vector Stores - summary: Returns a list of vector store files. - operationId: listVectorStoreFiles - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the files belong to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: filter - in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' - schema: - enum: - - in_progress - - completed - - failed - - cancelled - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' - x-oaiMeta: - name: List vector store files - group: vector_stores - beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.files.list(\n vector_store_id=\"vs_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.files.list(\n \"vs_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" - post: - tags: - - Vector Stores - summary: 'Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).' - operationId: createVectorStoreFile - parameters: - - name: vector_store_id - in: path - description: "The ID of the vector store for which to create a File.\n" - required: true - schema: - type: string - example: vs_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreFileRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileObject' - x-oaiMeta: - name: Create vector store file - group: vector_stores - beta: true - returns: 'A [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_id\": \"file-abc123\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.create(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFile = await openai.beta.vectorStores.files.create(\n \"vs_abc123\",\n {\n file_id: \"file-abc123\"\n }\n );\n console.log(myVectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"usage_bytes\": 1234,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - '/vector_stores/{vector_store_id}/files/{file_id}': - get: - tags: - - Vector Stores - summary: Retrieves a vector store file. - operationId: getVectorStoreFile - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file belongs to. - required: true - schema: - type: string - example: vs_abc123 - - name: file_id - in: path - description: The ID of the file being retrieved. - required: true - schema: - type: string - example: file-abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileObject' - x-oaiMeta: - name: Retrieve vector store file - group: vector_stores - beta: true - returns: 'The [vector store file](/docs/api-reference/vector-stores-files/file-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file = client.beta.vector_stores.files.retrieve(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFile = await openai.beta.vectorStores.files.retrieve(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(vectorStoreFile);\n}\n\nmain();\n" - response: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abcd\",\n \"status\": \"completed\",\n \"last_error\": null\n}\n" - delete: - tags: - - Vector Stores - summary: 'Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.' - operationId: deleteVectorStoreFile - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file belongs to. - required: true - schema: - type: string - - name: file_id - in: path - description: The ID of the file to delete. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteVectorStoreFileResponse' - x-oaiMeta: - name: Delete vector store file - group: vector_stores - beta: true - returns: Deletion status - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X DELETE\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file = client.beta.vector_stores.files.delete(\n vector_store_id=\"vs_abc123\",\n file_id=\"file-abc123\"\n)\nprint(deleted_vector_store_file)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFile = await openai.beta.vectorStores.files.del(\n \"vs_abc123\",\n \"file-abc123\"\n );\n console.log(deletedVectorStoreFile);\n}\n\nmain();\n" - response: "{\n id: \"file-abc123\",\n object: \"vector_store.file.deleted\",\n deleted: true\n}\n" - '/vector_stores/{vector_store_id}/file_batches': - post: - tags: - - Vector Stores - summary: Create a vector store file batch. - operationId: createVectorStoreFileBatch - parameters: - - name: vector_store_id - in: path - description: "The ID of the vector store for which to create a File Batch.\n" - required: true - schema: - type: string - example: vs_abc123 - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateVectorStoreFileBatchRequest' - required: true - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - x-oaiMeta: - name: Create vector store file batch - group: vector_stores - beta: true - returns: 'A [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -d '{\n \"file_ids\": [\"file-abc123\", \"file-abc456\"]\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.create(\n vector_store_id=\"vs_abc123\",\n file_ids=[\"file-abc123\", \"file-abc456\"]\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create(\n \"vs_abc123\",\n {\n file_ids: [\"file-abc123\", \"file-abc456\"]\n }\n );\n console.log(myVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}': - get: - tags: - - Vector Stores - summary: Retrieves a vector store file batch. - operationId: getVectorStoreFileBatch - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file batch belongs to. - required: true - schema: - type: string - example: vs_abc123 - - name: batch_id - in: path - description: The ID of the file batch being retrieved. - required: true - schema: - type: string - example: vsfb_abc123 - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - x-oaiMeta: - name: Retrieve vector store file batch - group: vector_stores - beta: true - returns: 'The [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_file_batch = client.beta.vector_stores.file_batches.retrieve(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"in_progress\",\n \"file_counts\": {\n \"in_progress\": 1,\n \"completed\": 1,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 0,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel': - post: - tags: - - Vector Stores - summary: Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. - operationId: cancelVectorStoreFileBatch - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the file batch belongs to. - required: true - schema: - type: string - - name: batch_id - in: path - description: The ID of the file batch to cancel. - required: true - schema: - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - x-oaiMeta: - name: Cancel vector store file batch - group: vector_stores - beta: true - returns: The modified vector store file batch object. - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\ndeleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel(\n vector_store_id=\"vs_abc123\",\n file_batch_id=\"vsfb_abc123\"\n)\nprint(deleted_vector_store_file_batch)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(deletedVectorStoreFileBatch);\n}\n\nmain();\n" - response: "{\n \"id\": \"vsfb_abc123\",\n \"object\": \"vector_store.file_batch\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"cancelling\",\n \"file_counts\": {\n \"in_progress\": 12,\n \"completed\": 3,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 15,\n }\n}\n" - '/vector_stores/{vector_store_id}/file_batches/{batch_id}/files': - get: - tags: - - Vector Stores - summary: Returns a list of vector store files in a batch. - operationId: listFilesInVectorStoreBatch - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store that the files belong to. - required: true - schema: - type: string - - name: batch_id - in: path - description: The ID of the file batch that the files belong to. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: order - in: query - description: "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.\n" - schema: - enum: - - asc - - desc - type: string - default: desc - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - - name: filter - in: query - description: 'Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.' - schema: - enum: - - in_progress - - completed - - failed - - cancelled - type: string - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorStoreFilesResponse' - x-oaiMeta: - name: List vector store files in a batch - group: vector_stores - beta: true - returns: 'A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -H \"OpenAI-Beta: assistants=v2\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nvector_store_files = client.beta.vector_stores.file_batches.list_files(\n vector_store_id=\"vs_abc123\",\n batch_id=\"vsfb_abc123\"\n)\nprint(vector_store_files)\n" - node.js: "import OpenAI from \"openai\";\nconst openai = new OpenAI();\n\nasync function main() {\n const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles(\n \"vs_abc123\",\n \"vsfb_abc123\"\n );\n console.log(vectorStoreFiles);\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n },\n {\n \"id\": \"file-abc456\",\n \"object\": \"vector_store.file\",\n \"created_at\": 1699061776,\n \"vector_store_id\": \"vs_abc123\"\n }\n ],\n \"first_id\": \"file-abc123\",\n \"last_id\": \"file-abc456\",\n \"has_more\": false\n}\n" - /batches: - post: - tags: - - Batch - summary: Creates and executes a batch from an uploaded file of requests - operationId: createBatch - requestBody: - content: - application/json: - schema: - required: - - input_file_id - - endpoint - - completion_window - type: object - properties: - input_file_id: - type: string - description: "The ID of an uploaded file that contains requests for the new batch.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size.\n" - endpoint: - enum: - - /v1/chat/completions - - /v1/embeddings - - /v1/completions - type: string - description: 'The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch.' - completion_window: - enum: - - 24h - type: string - description: The time frame within which the batch should be processed. Currently only `24h` is supported. - metadata: - type: object - additionalProperties: - type: string - description: Optional custom metadata for the batch. - nullable: true - required: true - responses: - '200': - description: Batch created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Batch' - x-oaiMeta: - name: Create batch - group: batch - returns: 'The created [Batch](/docs/api-reference/batch/object) object.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input_file_id\": \"file-abc123\",\n \"endpoint\": \"/v1/chat/completions\",\n \"completion_window\": \"24h\"\n }'\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.create(\n input_file_id=\"file-abc123\",\n endpoint=\"/v1/chat/completions\",\n completion_window=\"24h\"\n)\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.create({\n input_file_id: \"file-abc123\",\n endpoint: \"/v1/chat/completions\",\n completion_window: \"24h\"\n });\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"validating\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": null,\n \"expires_at\": null,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 0,\n \"completed\": 0,\n \"failed\": 0\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - get: - tags: - - Batch - summary: List your organization's batches. - operationId: listBatches - parameters: - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - responses: - '200': - description: Batch listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ListBatchesResponse' - x-oaiMeta: - name: List batch - group: batch - returns: 'A list of paginated [Batch](/docs/api-reference/batch/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches?limit=2 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\"\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.list()\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const list = await openai.batches.list();\n\n for await (const batch of list) {\n console.log(batch);\n }\n}\n\nmain();\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly job\",\n }\n },\n { ... },\n ],\n \"first_id\": \"batch_abc123\",\n \"last_id\": \"batch_abc456\",\n \"has_more\": true\n}\n" - '/batches/{batch_id}': - get: - tags: - - Batch - summary: Retrieves a batch. - operationId: retrieveBatch - parameters: - - name: batch_id - in: path - description: The ID of the batch to retrieve. - required: true - schema: - type: string - responses: - '200': - description: Batch retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Batch' - x-oaiMeta: - name: Retrieve batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123 \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.retrieve(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.retrieve(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - '/batches/{batch_id}/cancel': - post: - tags: - - Batch - summary: 'Cancels an in-progress batch. The batch will be in status `cancelling` for up to 10 minutes, before changing to `cancelled`, where it will have partial results (if any) available in the output file.' - operationId: cancelBatch - parameters: - - name: batch_id - in: path - description: The ID of the batch to cancel. - required: true - schema: - type: string - responses: - '200': - description: Batch is cancelling. Returns the cancelling batch's details. - content: - application/json: - schema: - $ref: '#/components/schemas/Batch' - x-oaiMeta: - name: Cancel batch - group: batch - returns: 'The [Batch](/docs/api-reference/batch/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/batches/batch_abc123/cancel \\\n -H \"Authorization: Bearer $OPENAI_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -X POST\n" - python: "from openai import OpenAI\nclient = OpenAI()\n\nclient.batches.cancel(\"batch_abc123\")\n" - node: "import OpenAI from \"openai\";\n\nconst openai = new OpenAI();\n\nasync function main() {\n const batch = await openai.batches.cancel(\"batch_abc123\");\n\n console.log(batch);\n}\n\nmain();\n" - response: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/chat/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"cancelling\",\n \"output_file_id\": null,\n \"error_file_id\": null,\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": null,\n \"completed_at\": null,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": 1711475133,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 23,\n \"failed\": 1\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - /organization/audit_logs: - get: - tags: - - Audit Logs - summary: List user actions and configuration changes within this organization. - operationId: list-audit-logs - parameters: - - name: effective_at - in: query - description: Return only events whose `effective_at` (Unix seconds) is in this range. - schema: - type: object - properties: - gt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than this value. - gte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is greater than or equal to this value. - lt: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than this value. - lte: - type: integer - description: Return only events whose `effective_at` (Unix seconds) is less than or equal to this value. - - name: 'project_ids[]' - in: query - description: Return only events for these projects. - schema: - type: array - items: - type: string - - name: 'event_types[]' - in: query - description: 'Return only events with a `type` in one of these values. For example, `project.created`. For all options, see the documentation for the [audit log object](/docs/api-reference/audit-logs/object).' - schema: - type: array - items: - $ref: '#/components/schemas/AuditLogEventType' - - name: 'actor_ids[]' - in: query - description: 'Return only events performed by these actors. Can be a user ID, a service account ID, or an api key tracking ID.' - schema: - type: array - items: - type: string - - name: 'actor_emails[]' - in: query - description: Return only events performed by users with these emails. - schema: - type: array - items: - type: string - - name: 'resource_ids[]' - in: query - description: 'Return only events performed on these targets. For example, a project ID updated.' - schema: - type: array - items: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: before - in: query - description: "A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.\n" - schema: - type: string - responses: - '200': - description: Audit logs listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ListAuditLogsResponse' - x-oaiMeta: - name: List audit logs - group: audit-logs - returns: 'A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/audit_logs \\\n-H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n-H \"Content-Type: application/json\" \\\n" - response: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"audit_log-xxx_yyyymmdd\",\n \"type\": \"project.archived\",\n \"effective_at\": 1722461446,\n \"actor\": {\n \"type\": \"api_key\",\n \"api_key\": {\n \"type\": \"user\",\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n }\n }\n },\n \"project.archived\": {\n \"id\": \"proj_abc\"\n },\n },\n {\n \"id\": \"audit_log-yyy__20240101\",\n \"type\": \"api_key.updated\",\n \"effective_at\": 1720804190,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.updated\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource_2.operation_2\"]\n }\n },\n }\n ],\n \"first_id\": \"audit_log-xxx__20240101\",\n \"last_id\": \"audit_log_yyy__20240101\",\n \"has_more\": true\n}\n" - /organization/invites: - get: - tags: - - Invites - summary: Returns a list of invites in the organization. - operationId: list-invites - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Invites listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteListResponse' - x-oaiMeta: - name: List invites - group: administration - returns: 'A list of [Invite](/docs/api-reference/invite/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n }\n ],\n \"first_id\": \"invite-abc\",\n \"last_id\": \"invite-abc\",\n \"has_more\": false\n}\n" - post: - tags: - - Invites - summary: Create an invite for a user to the organization. The invite must be accepted by the user before they have access to the organization. - operationId: inviteUser - requestBody: - description: The invite request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteRequest' - required: true - responses: - '200': - description: User invited successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Invite' - x-oaiMeta: - name: Create invite - group: administration - returns: 'The created [Invite](/docs/api-reference/invite/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/invites \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"email\": \"user@example.com\",\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": null\n}\n" - '/organization/invites/{invite_id}': - get: - tags: - - Invites - summary: Retrieves an invite. - operationId: retrieve-invite - parameters: - - name: invite_id - in: path - description: The ID of the invite to retrieve. - required: true - schema: - type: string - responses: - '200': - description: Invite retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Invite' - x-oaiMeta: - name: Retrieve invite - group: administration - returns: 'The [Invite](/docs/api-reference/invite/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - delete: - tags: - - Invites - summary: 'Delete an invite. If the invite has already been accepted, it cannot be deleted.' - operationId: delete-invite - parameters: - - name: invite_id - in: path - description: The ID of the invite to delete. - required: true - schema: - type: string - responses: - '200': - description: Invite deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/InviteDeleteResponse' - x-oaiMeta: - name: Delete invite - group: administration - returns: Confirmation that the invite has been deleted - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/invites/invite-abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.invite.deleted\",\n \"id\": \"invite-abc\",\n \"deleted\": true\n} \n" - /organization/users: - get: - tags: - - Users - summary: Lists all of the users in the organization. - operationId: list-users - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/UserListResponse' - x-oaiMeta: - name: List users - group: administration - returns: 'A list of [User](/docs/api-reference/users/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - '/organization/users/{user_id}': - get: - tags: - - Users - summary: Retrieves a user by their identifier. - operationId: retrieve-user - parameters: - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: User retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Retrieve user - group: administration - returns: 'The [User](/docs/api-reference/users/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - post: - tags: - - Users - summary: Modifies a user's role in the organization. - operationId: modify-user - requestBody: - description: The new user role to modify. This must be one of `owner` or `member`. - content: - application/json: - schema: - $ref: '#/components/schemas/UserRoleUpdateRequest' - required: true - responses: - '200': - description: User role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/User' - x-oaiMeta: - name: Modify user - group: administration - returns: 'The updated [User](/docs/api-reference/users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - delete: - tags: - - Users - summary: Deletes a user from the organization. - operationId: delete-user - parameters: - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: User deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/UserDeleteResponse' - x-oaiMeta: - name: Delete user - group: administration - returns: Confirmation of the deleted user - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n} \n" - /organization/projects: - get: - tags: - - Projects - summary: Returns a list of projects. - operationId: list-projects - parameters: - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - - name: include_archived - in: query - description: If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. - schema: - type: boolean - default: false - responses: - '200': - description: Projects listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectListResponse' - x-oaiMeta: - name: List projects - group: administration - returns: 'A list of [Project](/docs/api-reference/projects/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n }\n ],\n \"first_id\": \"proj-abc\",\n \"last_id\": \"proj-xyz\",\n \"has_more\": false\n}\n" - post: - tags: - - Projects - summary: 'Create a new project in the organization. Projects can be created and archived, but cannot be deleted.' - operationId: create-project - requestBody: - description: The project create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectCreateRequest' - required: true - responses: - '200': - description: Project created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Create project - group: administration - returns: 'The created [Project](/docs/api-reference/projects/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project ABC\"\n }'\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project ABC\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - '/organization/projects/{project_id}': - get: - tags: - - Projects - summary: Retrieves a project. - operationId: retrieve-project - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - responses: - '200': - description: Project retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Retrieve project - group: administration - description: Retrieve a project. - returns: 'The [Project](/docs/api-reference/projects/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - post: - tags: - - Projects - summary: Modifies a project in the organization. - operationId: modify-project - requestBody: - description: The project update request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUpdateRequest' - required: true - responses: - '200': - description: Project updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - '400': - description: Error response when updating the default project. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Modify project - group: administration - returns: 'The updated [Project](/docs/api-reference/projects/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Project DEF\"\n }'\n" - '/organization/projects/{project_id}/archive': - post: - tags: - - Projects - summary: Archives a project in the organization. Archived projects cannot be used or updated. - operationId: archive-project - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - responses: - '200': - description: Project archived successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/Project' - x-oaiMeta: - name: Archive project - group: administration - returns: 'The archived [Project](/docs/api-reference/projects/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/archive \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project DEF\",\n \"created_at\": 1711471533,\n \"archived_at\": 1711471533,\n \"status\": \"archived\"\n}\n" - '/organization/projects/{project_id}/users': - get: - tags: - - Projects - summary: Returns a list of users in the project. - operationId: list-project-users - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Project users listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserListResponse' - '400': - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: List project users - group: administration - returns: 'A list of [ProjectUser](/docs/api-reference/project-users/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n ],\n \"first_id\": \"user-abc\",\n \"last_id\": \"user-xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - post: - tags: - - Projects - summary: Adds a user to the project. Users must already be members of the organization to be added to a project. - operationId: create-project-user - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - requestBody: - description: The project user create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserCreateRequest' - required: true - responses: - '200': - description: User added to project successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Create project user - group: administration - returns: 'The created [ProjectUser](/docs/api-reference/project-users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"user_id\": \"user_abc\",\n \"role\": \"member\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/users/{user_id}': - get: - tags: - - Projects - summary: Retrieves a user in the project. - operationId: retrieve-project-user - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: Project user retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - x-oaiMeta: - name: Retrieve project user - group: administration - returns: 'The [ProjectUser](/docs/api-reference/project-users/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - post: - tags: - - Projects - summary: Modifies a user's role in the project. - operationId: modify-project-user - requestBody: - description: The project user update request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserUpdateRequest' - required: true - responses: - '200': - description: Project user's role updated successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUser' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Modify project user - group: administration - returns: 'The updated [ProjectUser](/docs/api-reference/project-users/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"role\": \"owner\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - delete: - tags: - - Projects - summary: Deletes a user from the project. - operationId: delete-project-user - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: user_id - in: path - description: The ID of the user. - required: true - schema: - type: string - responses: - '200': - description: Project user deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectUserDeleteResponse' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Delete project user - group: administration - returns: 'Confirmation that project has been deleted or an error in case of an archived project, which has no users' - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.user.deleted\",\n \"id\": \"user_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/service_accounts': - get: - tags: - - Projects - summary: Returns a list of service accounts in the project. - operationId: list-project-service-accounts - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Project service accounts listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountListResponse' - '400': - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: List project service accounts - group: administration - returns: 'A list of [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n }\n ],\n \"first_id\": \"svc_acct_abc\",\n \"last_id\": \"svc_acct_xyz\",\n \"has_more\": false\n}\n" - post: - tags: - - Projects - summary: Creates a new service account in the project. This also returns an unredacted API key for the service account. - operationId: create-project-service-account - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - requestBody: - description: The project service account create request payload. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateRequest' - required: true - responses: - '200': - description: Project service account created successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountCreateResponse' - '400': - description: Error response when project is archived. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Create project service account - group: administration - returns: 'The created [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object.' - examples: - request: - curl: "curl -X POST https://api.openai.com/v1/organization/projects/proj_abc/service_accounts \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"name\": \"Production App\"\n }'\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Production App\",\n \"role\": \"member\",\n \"created_at\": 1711471533,\n \"api_key\": {\n \"object\": \"organization.project.service_account.api_key\",\n \"value\": \"sk-abcdefghijklmnop123\",\n \"name\": \"Secret Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\"\n }\n}\n" - '/organization/projects/{project_id}/service_accounts/{service_account_id}': - get: - tags: - - Projects - summary: Retrieves a service account in the project. - operationId: retrieve-project-service-account - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true - schema: - type: string - responses: - '200': - description: Project service account retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccount' - x-oaiMeta: - name: Retrieve project service account - group: administration - returns: 'The [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - delete: - tags: - - Projects - summary: Deletes a service account from the project. - operationId: delete-project-service-account - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: service_account_id - in: path - description: The ID of the service account. - required: true - schema: - type: string - responses: - '200': - description: Project service account deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectServiceAccountDeleteResponse' - x-oaiMeta: - name: Delete project service account - group: administration - returns: 'Confirmation of service account being deleted, or an error in case of an archived project, which has no service accounts' - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.service_account.deleted\",\n \"id\": \"svc_acct_abc\",\n \"deleted\": true\n}\n" - '/organization/projects/{project_id}/api_keys': - get: - tags: - - Projects - summary: Returns a list of API keys in the project. - operationId: list-project-api-keys - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: limit - in: query - description: "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.\n" - schema: - type: integer - default: 20 - - name: after - in: query - description: "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.\n" - schema: - type: string - responses: - '200': - description: Project API keys listed successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKeyListResponse' - x-oaiMeta: - name: List project API keys - group: administration - returns: 'A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) objects.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n }\n ],\n \"first_id\": \"key_abc\",\n \"last_id\": \"key_xyz\",\n \"has_more\": false\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"Project {name} is archived\"\n}\n" - '/organization/projects/{project_id}/api_keys/{key_id}': - get: - tags: - - Projects - summary: Retrieves an API key in the project. - operationId: retrieve-project-api-key - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: key_id - in: path - description: The ID of the API key. - required: true - schema: - type: string - responses: - '200': - description: Project API key retrieved successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKey' - x-oaiMeta: - name: Retrieve project API key - group: administration - returns: 'The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object matching the specified ID.' - examples: - request: - curl: "curl https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" - delete: - tags: - - Projects - summary: Deletes an API key from the project. - operationId: delete-project-api-key - parameters: - - name: project_id - in: path - description: The ID of the project. - required: true - schema: - type: string - - name: key_id - in: path - description: The ID of the API key. - required: true - schema: - type: string - responses: - '200': - description: Project API key deleted successfully. - content: - application/json: - schema: - $ref: '#/components/schemas/ProjectApiKeyDeleteResponse' - '400': - description: Error response for various conditions. - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - x-oaiMeta: - name: Delete project API key - group: administration - returns: Confirmation of the key's deletion or an error if the key belonged to a service account - examples: - request: - curl: "curl -X DELETE https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc \\\n -H \"Authorization: Bearer $OPENAI_ADMIN_KEY\" \\\n -H \"Content-Type: application/json\"\n" - response: - content: "{\n \"object\": \"organization.project.api_key.deleted\",\n \"id\": \"key_abc\",\n \"deleted\": true\n}\n" - error_response: - content: "{\n \"code\": 400,\n \"message\": \"API keys cannot be deleted for service accounts, please delete the service account\"\n} \n" -components: - schemas: - Error: - required: - - type - - message - - param - - code - type: object - properties: - code: - type: string - nullable: true - message: - type: string - param: - type: string - nullable: true - type: - type: string - ErrorResponse: - required: - - error - type: object - properties: - error: - $ref: '#/components/schemas/Error' - ListModelsResponse: - required: - - object - - data - type: object - properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/Model' - DeleteModelResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - type: string - CreateCompletionRequest: - required: - - model - - prompt - type: object - properties: - model: - anyOf: - - type: string - - enum: - - gpt-3.5-turbo-instruct - - davinci-002 - - babbage-002 - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - x-oaiTypeLabel: string - prompt: - oneOf: - - type: string - default: '' - example: This is a test. - - type: array - items: - type: string - default: '' - example: This is a test. - - minItems: 1 - type: array - items: - type: integer - example: '[1212, 318, 257, 1332, 13]' - - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - example: '[[1212, 318, 257, 1332, 13]]' - description: "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n" - default: <|endoftext|> - nullable: true - best_of: - maximum: 20 - minimum: 0 - type: integer - description: "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" - default: 1 - nullable: true - echo: - type: boolean - description: "Echo back the prompt in addition to the completion\n" - default: false - nullable: true - frequency_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - logit_bias: - type: object - additionalProperties: - type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n" - default: - nullable: true - x-oaiTypeLabel: map - logprobs: - maximum: 5 - minimum: 0 - type: integer - description: "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n" - default: - nullable: true - max_tokens: - minimum: 0 - type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - default: 16 - nullable: true - example: 16 - n: - maximum: 128 - minimum: 1 - type: integer - description: "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n" - default: 1 - nullable: true - example: 1 - presence_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 - type: integer - description: "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - stop: - oneOf: - - type: string - default: <|endoftext|> - nullable: true - example: "\n" - - maxItems: 4 - minItems: 1 - type: array - items: - type: string - example: '["\n"]' - description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.\n" - default: - nullable: true - stream: - type: boolean - description: "Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" - default: false - nullable: true - stream_options: - $ref: '#/components/schemas/ChatCompletionStreamOptions' - suffix: - type: string - description: "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n" - default: - nullable: true - example: test. - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" - default: 1 - nullable: true - example: 1 - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateCompletionResponse: - required: - - id - - object - - created - - model - - choices - type: object - properties: - id: - type: string - description: A unique identifier for the completion. - choices: - type: array - items: - required: - - finish_reason - - index - - logprobs - - text - type: object - properties: - finish_reason: - enum: - - stop - - length - - content_filter - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" - index: - type: integer - logprobs: - type: object - properties: - text_offset: - type: array - items: - type: integer - token_logprobs: - type: array - items: - type: number - tokens: - type: array - items: - type: string - top_logprobs: - type: array - items: - type: object - additionalProperties: - type: number - nullable: true - text: - type: string - description: The list of completion choices the model generated for the input prompt. - created: - type: integer - description: The Unix timestamp (in seconds) of when the completion was created. - model: - type: string - description: The model used for completion. - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - text_completion - type: string - description: 'The object type, which is always "text_completion"' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: "Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).\n" - x-oaiMeta: - name: The completion object - legacy: true - example: "{\n \"id\": \"cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"gpt-4-turbo\",\n \"choices\": [\n {\n \"text\": \"\\n\\nThis is indeed a test\",\n \"index\": 0,\n \"logprobs\": null,\n \"finish_reason\": \"length\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 5,\n \"completion_tokens\": 7,\n \"total_tokens\": 12\n }\n}\n" - ChatCompletionRequestMessageContentPartText: - title: Text content part - required: - - type - - text - type: object - properties: - type: - enum: - - text - type: string - description: The type of the content part. - text: - type: string - description: The text content. - ChatCompletionRequestMessageContentPartImage: - title: Image content part - required: - - type - - image_url - type: object - properties: - type: - enum: - - image_url - type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: Either a URL of the image or the base64 encoded image data. - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).' - default: auto - ChatCompletionRequestMessageContentPartRefusal: - title: Refusal content part - required: - - type - - refusal - type: object - properties: - type: - enum: - - refusal - type: string - description: The type of the content part. - refusal: - type: string - description: The refusal message generated by the model. - ChatCompletionRequestMessage: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - ChatCompletionRequestSystemMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestUserMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartImage' - x-oaiExpandable: true - ChatCompletionRequestAssistantMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartRefusal' - x-oaiExpandable: true - ChatCompletionRequestToolMessageContentPart: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestMessageContentPartText' - x-oaiExpandable: true - ChatCompletionRequestSystemMessage: - title: System message - required: - - content - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The contents of the system message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestSystemMessageContentPart' - description: 'An array of content parts with a defined type. For system messages, only type `text` is supported.' - description: The contents of the system message. - role: - enum: - - system - type: string - description: 'The role of the messages author, in this case `system`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestUserMessage: - title: User message - required: - - content - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The text contents of the message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestUserMessageContentPart' - description: 'An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model.' - description: "The contents of the user message.\n" - x-oaiExpandable: true - role: - enum: - - user - type: string - description: 'The role of the messages author, in this case `user`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - ChatCompletionRequestAssistantMessage: - title: Assistant message - required: - - role - type: object - properties: - content: - oneOf: - - title: Text content - type: string - description: The contents of the assistant message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessageContentPart' - description: 'An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.' - description: "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n" - nullable: true - refusal: - type: string - description: The refusal message by the assistant. - nullable: true - role: - enum: - - assistant - type: string - description: 'The role of the messages author, in this case `assistant`.' - name: - type: string - description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - function_call: - required: - - arguments - - name - type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - nullable: true - deprecated: true - FineTuneChatCompletionRequestAssistantMessage: - required: - - role - allOf: - - title: Assistant message - type: object - properties: - weight: - enum: - - 0 - - 1 - type: integer - description: Controls whether the assistant message is trained against (0 or 1) - - $ref: '#/components/schemas/ChatCompletionRequestAssistantMessage' - ChatCompletionRequestToolMessage: - title: Tool message - required: - - role - - content - - tool_call_id - type: object - properties: - role: - enum: - - tool - type: string - description: 'The role of the messages author, in this case `tool`.' - content: - oneOf: - - title: Text content - type: string - description: The contents of the tool message. - - title: Array of content parts - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestToolMessageContentPart' - description: 'An array of content parts with a defined type. For tool messages, only type `text` is supported.' - description: The contents of the tool message. - tool_call_id: - type: string - description: Tool call that this message is responding to. - ChatCompletionRequestFunctionMessage: - title: Function message - required: - - role - - content - - name - type: object - properties: - role: - enum: - - function - type: string - description: 'The role of the messages author, in this case `function`.' - content: - type: string - description: The contents of the function message. - nullable: true - name: - type: string - description: The name of the function to call. - deprecated: true - FunctionParameters: - type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." - ChatCompletionFunctions: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - deprecated: true - ChatCompletionFunctionCallOption: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n" - ChatCompletionTool: - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - $ref: '#/components/schemas/FunctionObject' - FunctionObject: - required: - - name - type: object - properties: - description: - type: string - description: 'A description of what the function does, used by the model to choose when and how to call the function.' - name: - type: string - description: 'The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - parameters: - $ref: '#/components/schemas/FunctionParameters' - strict: - type: boolean - description: 'Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).' - default: false - nullable: true - ResponseFormatText: - required: - - type - type: object - properties: - type: - enum: - - text - type: string - description: 'The type of response format being defined: `text`' - ResponseFormatJsonObject: - required: - - type - type: object - properties: - type: - enum: - - json_object - type: string - description: 'The type of response format being defined: `json_object`' - ResponseFormatJsonSchemaSchema: - type: object - description: 'The schema for the response format, described as a JSON Schema object.' - ResponseFormatJsonSchema: - required: - - type - - json_schema - type: object - properties: - type: - enum: - - json_schema - type: string - description: 'The type of response format being defined: `json_schema`' - json_schema: - required: - - type - - name - type: object - properties: - description: - type: string - description: 'A description of what the response format is for, used by the model to determine how to respond in the format.' - name: - type: string - description: 'The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.' - schema: - $ref: '#/components/schemas/ResponseFormatJsonSchemaSchema' - strict: - type: boolean - description: 'Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs).' - default: false - nullable: true - ChatCompletionToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required - type: string - description: "`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.\n" - - $ref: '#/components/schemas/ChatCompletionNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" - x-oaiExpandable: true - ChatCompletionNamedToolChoice: - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific function. - ParallelToolCalls: - type: boolean - description: 'Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.' - nullable: true - ChatCompletionMessageToolCalls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCall' - description: 'The tool calls generated by the model, such as function calls.' - ChatCompletionMessageToolCall: - required: - - id - - type - - function - type: object - properties: - id: - type: string - description: The ID of the tool call. - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - required: - - name - - arguments - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - description: The function that the model called. - ChatCompletionMessageToolCallChunk: - required: - - index - type: object - properties: - index: - type: integer - id: - type: string - description: The ID of the tool call. - type: - enum: - - function - type: string - description: 'The type of the tool. Currently, only `function` is supported.' - function: - type: object - properties: - name: - type: string - description: The name of the function to call. - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - ChatCompletionRole: - enum: - - system - - user - - assistant - - tool - - function - type: string - description: The role of the author of a message - ChatCompletionStreamOptions: - type: object - properties: - include_usage: - type: boolean - description: "If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.\n" - description: "Options for streaming response. Only set this when you set `stream: true`.\n" - default: - nullable: true - ChatCompletionResponseMessage: - required: - - role - - content - - refusal - type: object - properties: - content: - type: string - description: The contents of the message. - nullable: true - refusal: - type: string - description: The refusal message generated by the model. - nullable: true - tool_calls: - $ref: '#/components/schemas/ChatCompletionMessageToolCalls' - role: - enum: - - assistant - type: string - description: The role of the author of this message. - function_call: - required: - - name - - arguments - type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - description: A chat completion message generated by the model. - ChatCompletionStreamResponseDelta: - type: object - properties: - content: - type: string - description: The contents of the chunk message. - nullable: true - function_call: - type: object - properties: - arguments: - type: string - description: 'The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.' - name: - type: string - description: The name of the function to call. - description: 'Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.' - deprecated: true - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatCompletionMessageToolCallChunk' - role: - enum: - - system - - user - - assistant - - tool - type: string - description: The role of the author of this message. - refusal: - type: string - description: The refusal message generated by the model. - nullable: true - description: A chat completion delta generated by streamed model responses. - CreateChatCompletionRequest: - required: - - model - - messages - type: object - properties: - messages: - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionRequestMessage' - description: 'A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0301 - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.' - example: gpt-4o - x-oaiTypeLabel: string - frequency_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - logit_bias: - type: object - additionalProperties: - type: integer - description: "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n" - default: - nullable: true - x-oaiTypeLabel: map - logprobs: - type: boolean - description: 'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.' - default: false - nullable: true - top_logprobs: - maximum: 20 - minimum: 0 - type: integer - description: 'An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.' - nullable: true - max_tokens: - type: integer - description: "The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.\n\nThe total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - nullable: true - n: - maximum: 128 - minimum: 1 - type: integer - description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. - default: 1 - nullable: true - example: 1 - presence_penalty: - maximum: 2 - minimum: -2 - type: number - description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)\n" - default: 0 - nullable: true - response_format: - oneOf: - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "An object specifying the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4o mini](/docs/models/gpt-4o-mini), [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - x-oaiExpandable: true - seed: - maximum: 9223372036854775807 - minimum: -9223372036854775808 - type: integer - description: "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n" - nullable: true - x-oaiMeta: - beta: true - service_tier: - enum: - - auto - - default - type: string - description: "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', the system will utilize scale tier credits until they are exhausted.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n" - default: - nullable: true - stop: - oneOf: - - type: string - nullable: true - - maxItems: 4 - minItems: 1 - type: array - items: - type: string - description: "Up to 4 sequences where the API will stop generating further tokens.\n" - default: - stream: - type: boolean - description: "If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n" - default: false - nullable: true - stream_options: - $ref: '#/components/schemas/ChatCompletionStreamOptions' - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n" - default: 1 - nullable: true - example: 1 - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n" - tool_choice: - $ref: '#/components/schemas/ChatCompletionToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - function_call: - oneOf: - - enum: - - none - - auto - type: string - description: "`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.\n" - - $ref: '#/components/schemas/ChatCompletionFunctionCallOption' - description: "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n`none` means the model will not call a function and instead generates a message.\n`auto` means the model can pick between generating a message or calling a function.\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.\n\n`none` is the default when no functions are present. `auto` is the default if functions are present.\n" - deprecated: true - x-oaiExpandable: true - functions: - maxItems: 128 - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n" - deprecated: true - CreateChatCompletionResponse: - required: - - choices - - created - - id - - model - - object - type: object - properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - items: - required: - - finish_reason - - index - - message - type: object - properties: - finish_reason: - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - logprobs: - required: - - content - - refusal - type: object - properties: - content: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. - nullable: true - refusal: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. - nullable: true - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - service_tier: - enum: - - scale - - default - type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - nullable: true - example: scale - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - chat.completion - type: string - description: 'The object type, which is always `chat.completion`.' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nHello there, how may I assist you today?\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - CreateChatCompletionFunctionResponse: - required: - - choices - - created - - id - - model - - object - type: object - properties: - id: - type: string - description: A unique identifier for the chat completion. - choices: - type: array - items: - required: - - finish_reason - - index - - message - - logprobs - type: object - properties: - finish_reason: - enum: - - stop - - length - - function_call - - content_filter - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function.\n" - index: - type: integer - description: The index of the choice in the list of choices. - message: - $ref: '#/components/schemas/ChatCompletionResponseMessage' - description: A list of chat completion choices. Can be more than one if `n` is greater than 1. - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. - model: - type: string - description: The model used for the chat completion. - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - chat.completion - type: string - description: 'The object type, which is always `chat.completion`.' - usage: - $ref: '#/components/schemas/CompletionUsage' - description: 'Represents a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion object - group: chat - example: "{\n \"id\": \"chatcmpl-abc123\",\n \"object\": \"chat.completion\",\n \"created\": 1699896916,\n \"model\": \"gpt-4o-mini\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n \"id\": \"call_abc123\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\n\\\"location\\\": \\\"Boston, MA\\\"\\n}\"\n }\n }\n ]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 82,\n \"completion_tokens\": 17,\n \"total_tokens\": 99\n }\n}\n" - ChatCompletionTokenLogprob: - required: - - token - - logprob - - bytes - - top_logprobs - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: - type: array - items: - type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - top_logprobs: - type: array - items: - required: - - token - - logprob - - bytes - type: object - properties: - token: - type: string - description: The token. - logprob: - type: number - description: 'The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.' - bytes: - type: array - items: - type: integer - description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. - nullable: true - description: 'List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.' - ListPaginatedFineTuningJobsResponse: - required: - - object - - data - - has_more - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJob' - has_more: - type: boolean - object: - enum: - - list - type: string - CreateChatCompletionStreamResponse: - required: - - choices - - created - - id - - model - - object - type: object - properties: - id: - type: string - description: A unique identifier for the chat completion. Each chunk has the same ID. - choices: - type: array - items: - required: - - delta - - finish_reason - - index - type: object - properties: - delta: - $ref: '#/components/schemas/ChatCompletionStreamResponseDelta' - logprobs: - required: - - content - - refusal - type: object - properties: - content: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message content tokens with log probability information. - nullable: true - refusal: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTokenLogprob' - description: A list of message refusal tokens with log probability information. - nullable: true - description: Log probability information for the choice. - nullable: true - finish_reason: - enum: - - stop - - length - - tool_calls - - content_filter - - function_call - type: string - description: "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" - nullable: true - index: - type: integer - description: The index of the choice in the list of choices. - description: "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" - created: - type: integer - description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - model: - type: string - description: The model to generate the completion. - service_tier: - enum: - - scale - - default - type: string - description: The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - nullable: true - example: scale - system_fingerprint: - type: string - description: "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n" - object: - enum: - - chat.completion.chunk - type: string - description: 'The object type, which is always `chat.completion.chunk`.' - usage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - description: "An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request.\nWhen present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n" - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"role\":\"assistant\",\"content\":\"\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{\"content\":\"Hello\"},\"logprobs\":null,\"finish_reason\":null}]}\n\n....\n\n{\"id\":\"chatcmpl-123\",\"object\":\"chat.completion.chunk\",\"created\":1694268190,\"model\":\"gpt-4o-mini\", \"system_fingerprint\": \"fp_44709d6fcb\", \"choices\":[{\"index\":0,\"delta\":{},\"logprobs\":null,\"finish_reason\":\"stop\"}]}\n" - CreateChatCompletionImageResponse: - type: object - description: 'Represents a streamed chunk of a chat completion response returned by model, based on the provided input.' - x-oaiMeta: - name: The chat completion chunk object - group: chat - example: "{\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"model\": \"gpt-4o-mini\",\n \"system_fingerprint\": \"fp_44709d6fcb\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"\\n\\nThis image shows a wooden boardwalk extending through a lush green marshland.\",\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }],\n \"usage\": {\n \"prompt_tokens\": 9,\n \"completion_tokens\": 12,\n \"total_tokens\": 21\n }\n}\n" - CreateImageRequest: - required: - - prompt - type: object - properties: - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. - example: A cute baby sea otter - model: - anyOf: - - type: string - - enum: - - dall-e-2 - - dall-e-3 - type: string - description: The model to use for image generation. - default: dall-e-2 - nullable: true - example: dall-e-3 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: 'The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.' - default: 1 - nullable: true - example: 1 - quality: - enum: - - standard - - hd - type: string - description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. - default: standard - example: standard - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - - 1792x1024 - - 1024x1792 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.' - default: 1024x1024 - nullable: true - example: 1024x1024 - style: - enum: - - vivid - - natural - type: string - description: 'The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`.' - default: vivid - nullable: true - example: vivid - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - ImagesResponse: - required: - - created - - data - properties: - created: - type: integer - data: - type: array - items: - $ref: '#/components/schemas/Image' - Image: - type: object - properties: - b64_json: - type: string - description: 'The base64-encoded JSON of the generated image, if `response_format` is `b64_json`.' - url: - type: string - description: 'The URL of the generated image, if `response_format` is `url` (default).' - revised_prompt: - type: string - description: 'The prompt that was used to generate the image, if there was any revision to the prompt.' - description: Represents the url or the content of an image generated by the OpenAI API. - x-oaiMeta: - name: The image object - example: "{\n \"url\": \"...\",\n \"revised_prompt\": \"...\"\n}\n" - CreateImageEditRequest: - required: - - prompt - - image - type: object - properties: - image: - type: string - description: 'The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.' - format: binary - prompt: - type: string - description: A text description of the desired image(s). The maximum length is 1000 characters. - example: A cute baby sea otter wearing a beret - mask: - type: string - description: 'An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.' - format: binary - model: - anyOf: - - type: string - - enum: - - dall-e-2 - type: string - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - default: dall-e-2 - nullable: true - example: dall-e-2 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: The number of images to generate. Must be between 1 and 10. - default: 1 - nullable: true - example: 1 - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' - default: 1024x1024 - nullable: true - example: 1024x1024 - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateImageVariationRequest: - required: - - image - type: object - properties: - image: - type: string - description: 'The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.' - format: binary - model: - anyOf: - - type: string - - enum: - - dall-e-2 - type: string - description: The model to use for image generation. Only `dall-e-2` is supported at this time. - default: dall-e-2 - nullable: true - example: dall-e-2 - x-oaiTypeLabel: string - n: - maximum: 10 - minimum: 1 - type: integer - description: 'The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.' - default: 1 - nullable: true - example: 1 - response_format: - enum: - - url - - b64_json - type: string - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. - default: url - nullable: true - example: url - size: - enum: - - 256x256 - - 512x512 - - 1024x1024 - type: string - description: 'The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.' - default: 1024x1024 - nullable: true - example: 1024x1024 - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - CreateModerationRequest: - required: - - input - type: object - properties: - input: - oneOf: - - type: string - default: '' - example: I want to kill them. - - type: array - items: - type: string - default: '' - example: I want to kill them. - description: The input text to classify - model: - anyOf: - - type: string - - enum: - - text-moderation-latest - - text-moderation-stable - type: string - description: "Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.\n\nThe default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.\n" - default: text-moderation-latest - example: text-moderation-stable - x-oaiTypeLabel: string - CreateModerationResponse: - required: - - id - - model - - results - type: object - properties: - id: - type: string - description: The unique identifier for the moderation request. - model: - type: string - description: The model used to generate the moderation results. - results: - type: array - items: - required: - - flagged - - categories - - category_scores - type: object - properties: - flagged: - type: boolean - description: Whether any of the below categories are flagged. - categories: - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - type: object - properties: - hate: - type: boolean - description: 'Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment.' - hate/threatening: - type: boolean - description: 'Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.' - harassment: - type: boolean - description: 'Content that expresses, incites, or promotes harassing language towards any target.' - harassment/threatening: - type: boolean - description: Harassment content that also includes violence or serious harm towards any target. - self-harm: - type: boolean - description: 'Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.' - self-harm/intent: - type: boolean - description: 'Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.' - self-harm/instructions: - type: boolean - description: 'Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.' - sexual: - type: boolean - description: 'Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).' - sexual/minors: - type: boolean - description: Sexual content that includes an individual who is under 18 years old. - violence: - type: boolean - description: 'Content that depicts death, violence, or physical injury.' - violence/graphic: - type: boolean - description: 'Content that depicts death, violence, or physical injury in graphic detail.' - description: 'A list of the categories, and whether they are flagged or not.' - category_scores: - required: - - hate - - hate/threatening - - harassment - - harassment/threatening - - self-harm - - self-harm/intent - - self-harm/instructions - - sexual - - sexual/minors - - violence - - violence/graphic - type: object - properties: - hate: - type: number - description: The score for the category 'hate'. - hate/threatening: - type: number - description: The score for the category 'hate/threatening'. - harassment: - type: number - description: The score for the category 'harassment'. - harassment/threatening: - type: number - description: The score for the category 'harassment/threatening'. - self-harm: - type: number - description: The score for the category 'self-harm'. - self-harm/intent: - type: number - description: The score for the category 'self-harm/intent'. - self-harm/instructions: - type: number - description: The score for the category 'self-harm/instructions'. - sexual: - type: number - description: The score for the category 'sexual'. - sexual/minors: - type: number - description: The score for the category 'sexual/minors'. - violence: - type: number - description: The score for the category 'violence'. - violence/graphic: - type: number - description: The score for the category 'violence/graphic'. - description: A list of the categories along with their scores as predicted by model. - description: A list of moderation objects. - description: Represents if a given text input is potentially harmful. - x-oaiMeta: - name: The moderation object - example: "{\n \"id\": \"modr-XXXXX\",\n \"model\": \"text-moderation-005\",\n \"results\": [\n {\n \"flagged\": true,\n \"categories\": {\n \"sexual\": false,\n \"hate\": false,\n \"harassment\": false,\n \"self-harm\": false,\n \"sexual/minors\": false,\n \"hate/threatening\": false,\n \"violence/graphic\": false,\n \"self-harm/intent\": false,\n \"self-harm/instructions\": false,\n \"harassment/threatening\": true,\n \"violence\": true,\n },\n \"category_scores\": {\n \"sexual\": 1.2282071e-06,\n \"hate\": 0.010696256,\n \"harassment\": 0.29842457,\n \"self-harm\": 1.5236925e-08,\n \"sexual/minors\": 5.7246268e-08,\n \"hate/threatening\": 0.0060676364,\n \"violence/graphic\": 4.435014e-06,\n \"self-harm/intent\": 8.098441e-10,\n \"self-harm/instructions\": 2.8498655e-11,\n \"harassment/threatening\": 0.63055265,\n \"violence\": 0.99011886,\n }\n }\n ]\n}\n" - ListFilesResponse: - required: - - object - - data - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFile' - object: - enum: - - list - type: string - CreateFileRequest: - required: - - file - - purpose - type: object - properties: - file: - type: string - description: "The File object (not file name) to be uploaded.\n" - format: binary - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nUse \"assistants\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \"vision\" for Assistants image file inputs, \"batch\" for [Batch API](/docs/guides/batch), and \"fine-tune\" for [Fine-tuning](/docs/api-reference/fine-tuning).\n" - additionalProperties: false - DeleteFileResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - object: - enum: - - file - type: string - deleted: - type: boolean - CreateUploadRequest: - required: - - filename - - purpose - - bytes - - mime_type - type: object - properties: - filename: - type: string - description: "The name of the file to upload.\n" - purpose: - enum: - - assistants - - batch - - fine-tune - - vision - type: string - description: "The intended purpose of the uploaded file.\n\nSee the [documentation on File purposes](/docs/api-reference/files/create#files-create-purpose).\n" - bytes: - type: integer - description: "The number of bytes in the file you are uploading.\n" - mime_type: - type: string - description: "The MIME type of the file.\n\nThis must fall within the supported MIME types for your file purpose. See the supported MIME types for assistants and vision.\n" - additionalProperties: false - AddUploadPartRequest: - required: - - data - type: object - properties: - data: - type: string - description: "The chunk of bytes for this Part.\n" - format: binary - additionalProperties: false - CompleteUploadRequest: - required: - - part_ids - type: object - properties: - part_ids: - type: array - items: - type: string - description: "The ordered list of Part IDs.\n" - md5: - type: string - description: "The optional md5 checksum for the file contents to verify if the bytes uploaded matches what you expect.\n" - additionalProperties: false - CancelUploadRequest: - type: object - additionalProperties: false - CreateFineTuningJobRequest: - required: - - model - - training_file - type: object - properties: - model: - anyOf: - - type: string - - enum: - - babbage-002 - - davinci-002 - - gpt-3.5-turbo - - gpt-4o-mini - type: string - description: "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning/which-models-can-be-fine-tuned).\n" - example: gpt-4o-mini - x-oaiTypeLabel: string - training_file: - type: string - description: "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) format.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" - example: file-abc123 - hyperparameters: - type: object - properties: - batch_size: - oneOf: - - enum: - - auto - type: string - - maximum: 256 - minimum: 1 - type: integer - description: "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.\n" - default: auto - learning_rate_multiplier: - oneOf: - - enum: - - auto - type: string - - minimum: 0 - exclusiveMinimum: true - type: number - description: "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.\n" - default: auto - n_epochs: - oneOf: - - enum: - - auto - type: string - - maximum: 50 - minimum: 1 - type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.\n" - default: auto - description: The hyperparameters used for the fine-tuning job. - suffix: - maxLength: 40 - minLength: 1 - type: string - description: "A string of up to 18 characters that will be added to your fine-tuned model name.\n\nFor example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.\n" - default: - nullable: true - validation_file: - type: string - description: "The ID of an uploaded file that contains validation data.\n\nIf you provide this file, the data is used to generate validation\nmetrics periodically during fine-tuning. These metrics can be viewed in\nthe fine-tuning results file.\nThe same data should not be present in both train and validation files.\n\nYour dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n\nSee the [fine-tuning guide](/docs/guides/fine-tuning) for more details.\n" - nullable: true - example: file-abc123 - integrations: - type: array - items: - required: - - type - - wandb - type: object - properties: - type: - oneOf: - - enum: - - wandb - type: string - description: "The type of integration to enable. Currently, only \"wandb\" (Weights and Biases) is supported.\n" - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - description: A list of integrations to enable for your fine-tuning job. - nullable: true - seed: - maximum: 2147483647 - minimum: 0 - type: integer - description: "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you.\n" - nullable: true - example: 42 - ListFineTuningJobEventsResponse: - required: - - object - - data - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobEvent' - object: - enum: - - list - type: string - ListFineTuningJobCheckpointsResponse: - required: - - object - - data - - has_more - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/FineTuningJobCheckpoint' - object: - enum: - - list - type: string - first_id: - type: string - nullable: true - last_id: - type: string - nullable: true - has_more: - type: boolean - CreateEmbeddingRequest: - required: - - model - - input - type: object - properties: - input: - oneOf: - - title: string - type: string - description: The string that will be turned into an embedding. - default: '' - example: This is a test. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: string - default: '' - example: '[''This is a test.'']' - description: The array of strings that will be turned into an embedding. - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - type: integer - description: The array of integers that will be turned into an embedding. - example: '[1212, 318, 257, 1332, 13]' - - title: array - maxItems: 2048 - minItems: 1 - type: array - items: - minItems: 1 - type: array - items: - type: integer - description: The array of arrays containing integers that will be turned into an embedding. - example: '[[1212, 318, 257, 1332, 13]]' - description: "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n" - example: The quick brown fox jumped over the lazy dog - x-oaiExpandable: true - model: - anyOf: - - type: string - - enum: - - text-embedding-ada-002 - - text-embedding-3-small - - text-embedding-3-large - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: text-embedding-3-small - x-oaiTypeLabel: string - encoding_format: - enum: - - float - - base64 - type: string - description: 'The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).' - default: float - example: float - dimensions: - minimum: 1 - type: integer - description: "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models.\n" - nullable: true - user: - type: string - description: "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n" - example: user-1234 - additionalProperties: false - CreateEmbeddingResponse: - required: - - object - - model - - data - - usage - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Embedding' - description: The list of embeddings generated by the model. - model: - type: string - description: The name of the model used to generate the embedding. - object: - enum: - - list - type: string - description: 'The object type, which is always "list".' - usage: - required: - - prompt_tokens - - total_tokens - type: object - properties: - prompt_tokens: - type: integer - description: The number of tokens used by the prompt. - total_tokens: - type: integer - description: The total number of tokens used by the request. - description: The usage information for the request. - CreateTranscriptionRequest: - required: - - file - - model - type: object - properties: - file: - type: string - description: "The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n" - format: binary - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - enum: - - whisper-1 - type: string - description: "ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available.\n" - example: whisper-1 - x-oaiTypeLabel: string - language: - type: string - description: "The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.\n" - prompt: - type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.\n" - response_format: - enum: - - json - - text - - srt - - verbose_json - - vtt - type: string - description: "The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" - default: json - temperature: - type: number - description: "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n" - default: 0 - 'timestamp_granularities[]': - type: array - items: - enum: - - word - - segment - type: string - description: "The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.\n" - default: - - segment - additionalProperties: false - CreateTranscriptionResponseJson: - required: - - text - type: object - properties: - text: - type: string - description: The transcribed text. - description: 'Represents a transcription response returned by model, based on the provided input.' - x-oaiMeta: - name: The transcription object (JSON) - group: audio - example: "{\n \"text\": \"Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that.\"\n}\n" - TranscriptionSegment: - required: - - id - - seek - - start - - end - - text - - tokens - - temperature - - avg_logprob - - compression_ratio - - no_speech_prob - type: object - properties: - id: - type: integer - description: Unique identifier of the segment. - seek: - type: integer - description: Seek offset of the segment. - start: - type: number - description: Start time of the segment in seconds. - format: float - end: - type: number - description: End time of the segment in seconds. - format: float - text: - type: string - description: Text content of the segment. - tokens: - type: array - items: - type: integer - description: Array of token IDs for the text content. - temperature: - type: number - description: Temperature parameter used for generating the segment. - format: float - avg_logprob: - type: number - description: 'Average logprob of the segment. If the value is lower than -1, consider the logprobs failed.' - format: float - compression_ratio: - type: number - description: 'Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed.' - format: float - no_speech_prob: - type: number - description: 'Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent.' - format: float - TranscriptionWord: - required: - - word - - start - - end - type: object - properties: - word: - type: string - description: The text content of the word. - start: - type: number - description: Start time of the word in seconds. - format: float - end: - type: number - description: End time of the word in seconds. - format: float - CreateTranscriptionResponseVerboseJson: - required: - - language - - duration - - text - type: object - properties: - language: - type: string - description: The language of the input audio. - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The transcribed text. - words: - type: array - items: - $ref: '#/components/schemas/TranscriptionWord' - description: Extracted words and their corresponding timestamps. - segments: - type: array - items: - $ref: '#/components/schemas/TranscriptionSegment' - description: Segments of the transcribed text and their corresponding details. - description: 'Represents a verbose json transcription response returned by model, based on the provided input.' - x-oaiMeta: - name: The transcription object (Verbose JSON) - group: audio - example: "{\n \"task\": \"transcribe\",\n \"language\": \"english\",\n \"duration\": 8.470000267028809,\n \"text\": \"The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.\",\n \"segments\": [\n {\n \"id\": 0,\n \"seek\": 0,\n \"start\": 0.0,\n \"end\": 3.319999933242798,\n \"text\": \" The beach was a popular spot on a hot summer day.\",\n \"tokens\": [\n 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530\n ],\n \"temperature\": 0.0,\n \"avg_logprob\": -0.2860786020755768,\n \"compression_ratio\": 1.2363636493682861,\n \"no_speech_prob\": 0.00985979475080967\n },\n ...\n ]\n}\n" - CreateTranslationRequest: - required: - - file - - model - type: object - properties: - file: - type: string - description: "The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n" - format: binary - x-oaiTypeLabel: file - model: - anyOf: - - type: string - - enum: - - whisper-1 - type: string - description: "ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available.\n" - example: whisper-1 - x-oaiTypeLabel: string - prompt: - type: string - description: "An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.\n" - response_format: - type: string - description: "The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.\n" - default: json - temperature: - type: number - description: "The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.\n" - default: 0 - additionalProperties: false - CreateTranslationResponseJson: - required: - - text - type: object - properties: - text: - type: string - CreateTranslationResponseVerboseJson: - required: - - language - - duration - - text - type: object - properties: - language: - type: string - description: The language of the output translation (always `english`). - duration: - type: string - description: The duration of the input audio. - text: - type: string - description: The translated text. - segments: - type: array - items: - $ref: '#/components/schemas/TranscriptionSegment' - description: Segments of the translated text and their corresponding details. - CreateSpeechRequest: - required: - - model - - input - - voice - type: object - properties: - model: - anyOf: - - type: string - - enum: - - tts-1 - - tts-1-hd - type: string - description: "One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd`\n" - x-oaiTypeLabel: string - input: - maxLength: 4096 - type: string - description: The text to generate audio for. The maximum length is 4096 characters. - voice: - enum: - - alloy - - echo - - fable - - onyx - - nova - - shimmer - type: string - description: 'The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options).' - response_format: - enum: - - mp3 - - opus - - aac - - flac - - wav - - pcm - type: string - description: 'The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.' - default: mp3 - speed: - maximum: 4.0 - minimum: 0.25 - type: number - description: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. - default: 1 - additionalProperties: false - Model: - title: Model - required: - - id - - object - - created - - owned_by - properties: - id: - type: string - description: 'The model identifier, which can be referenced in the API endpoints.' - created: - type: integer - description: The Unix timestamp (in seconds) when the model was created. - object: - enum: - - model - type: string - description: 'The object type, which is always "model".' - owned_by: - type: string - description: The organization that owns the model. - description: Describes an OpenAI model offering that can be used with the API. - x-oaiMeta: - name: The model object - example: "{\n \"id\": \"VAR_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" - OpenAIFile: - title: OpenAIFile - required: - - id - - object - - bytes - - created_at - - filename - - purpose - - status - properties: - id: - type: string - description: 'The file identifier, which can be referenced in the API endpoints.' - bytes: - type: integer - description: 'The size of the file, in bytes.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the file was created. - filename: - type: string - description: The name of the file. - object: - enum: - - file - type: string - description: 'The object type, which is always `file`.' - purpose: - enum: - - assistants - - assistants_output - - batch - - batch_output - - fine-tune - - fine-tune-results - - vision - type: string - description: 'The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`.' - status: - enum: - - uploaded - - processed - - error - type: string - description: 'Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`.' - deprecated: true - status_details: - type: string - description: 'Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.' - deprecated: true - description: The `File` object represents a document that has been uploaded to OpenAI. - x-oaiMeta: - name: The file object - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n}\n" - Upload: - title: Upload - required: - - bytes - - created_at - - expires_at - - filename - - id - - purpose - - status - - step_number - type: object - properties: - id: - type: string - description: 'The Upload unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - filename: - type: string - description: The name of the file to be uploaded. - bytes: - type: integer - description: The intended number of bytes to be uploaded. - purpose: - type: string - description: 'The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values.' - status: - enum: - - pending - - completed - - cancelled - - expired - type: string - description: The status of the Upload. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the Upload was created. - object: - enum: - - upload - type: string - description: 'The object type, which is always "upload".' - file: - $ref: '#/components/schemas/OpenAIFile' - description: "The Upload object can accept byte chunks in the form of Parts.\n" - x-oaiMeta: - name: The upload object - example: "{\n \"id\": \"upload_abc123\",\n \"object\": \"upload\",\n \"bytes\": 2147483648,\n \"created_at\": 1719184911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n \"status\": \"completed\",\n \"expires_at\": 1719127296,\n \"file\": {\n \"id\": \"file-xyz321\",\n \"object\": \"file\",\n \"bytes\": 2147483648,\n \"created_at\": 1719186911,\n \"filename\": \"training_examples.jsonl\",\n \"purpose\": \"fine-tune\",\n }\n}\n" - UploadPart: - title: UploadPart - required: - - created_at - - id - - object - - upload_id - type: object - properties: - id: - type: string - description: 'The upload Part unique identifier, which can be referenced in API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the Part was created. - upload_id: - type: string - description: The ID of the Upload object that this Part was added to. - object: - enum: - - upload.part - type: string - description: 'The object type, which is always `upload.part`.' - description: "The upload Part represents a chunk of bytes we can add to an Upload object.\n" - x-oaiMeta: - name: The upload part object - example: "{\n \"id\": \"part_def456\",\n \"object\": \"upload.part\",\n \"created_at\": 1719186911,\n \"upload_id\": \"upload_abc123\"\n}\n" - Embedding: - required: - - index - - object - - embedding - type: object - properties: - index: - type: integer - description: The index of the embedding in the list of embeddings. - embedding: - type: array - items: - type: number - description: "The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings).\n" - object: - enum: - - embedding - type: string - description: 'The object type, which is always "embedding".' - description: "Represents an embedding vector returned by embedding endpoint.\n" - x-oaiMeta: - name: The embedding object - example: "{\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n}\n" - FineTuningJob: - title: FineTuningJob - required: - - created_at - - error - - finished_at - - fine_tuned_model - - hyperparameters - - id - - model - - object - - organization_id - - result_files - - status - - trained_tokens - - training_file - - validation_file - - seed - type: object - properties: - id: - type: string - description: 'The object identifier, which can be referenced in the API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was created. - error: - required: - - code - - message - - param - type: object - properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - param: - type: string - description: 'The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific.' - nullable: true - description: 'For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure.' - nullable: true - fine_tuned_model: - type: string - description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. - nullable: true - finished_at: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. - nullable: true - hyperparameters: - required: - - n_epochs - type: object - properties: - n_epochs: - oneOf: - - enum: - - auto - type: string - - maximum: 50 - minimum: 1 - type: integer - description: "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.\n\"auto\" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs." - default: auto - description: 'The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.' - model: - type: string - description: The base model that is being fine-tuned. - object: - enum: - - fine_tuning.job - type: string - description: 'The object type, which is always "fine_tuning.job".' - organization_id: - type: string - description: The organization that owns the fine-tuning job. - result_files: - type: array - items: - type: string - example: file-abc123 - description: 'The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents).' - status: - enum: - - validating_files - - queued - - running - - succeeded - - failed - - cancelled - type: string - description: 'The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.' - trained_tokens: - type: integer - description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. - nullable: true - training_file: - type: string - description: 'The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents).' - validation_file: - type: string - description: 'The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents).' - nullable: true - integrations: - maxItems: 5 - type: array - items: - oneOf: - - $ref: '#/components/schemas/FineTuningIntegration' - x-oaiExpandable: true - description: A list of integrations to enable for this fine-tuning job. - nullable: true - seed: - type: integer - description: The seed used for the fine-tuning job. - estimated_finish: - type: integer - description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. - nullable: true - description: "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.\n" - x-oaiMeta: - name: The fine-tuning job object - example: "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0\n}\n" - FineTuningIntegration: - title: Fine-Tuning Job Integration - required: - - type - - wandb - type: object - properties: - type: - enum: - - wandb - type: string - description: The type of the integration being enabled for the fine-tuning job - wandb: - required: - - project - type: object - properties: - project: - type: string - description: "The name of the project that the new run will be created under.\n" - example: my-wandb-project - name: - type: string - description: "A display name to set for the run. If not set, we will use the Job ID as the name.\n" - nullable: true - entity: - type: string - description: "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used.\n" - nullable: true - tags: - type: array - items: - type: string - example: custom-tag - description: "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\".\n" - description: "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run.\n" - FineTuningJobEvent: - required: - - id - - object - - created_at - - level - - message - type: object - properties: - id: - type: string - created_at: - type: integer - level: - enum: - - info - - warn - - error - type: string - message: - type: string - object: - enum: - - fine_tuning.job.event - type: string - description: Fine-tuning job event object - x-oaiMeta: - name: The fine-tuning job event object - example: "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\"\n}\n" - FineTuningJobCheckpoint: - title: FineTuningJobCheckpoint - required: - - created_at - - fine_tuning_job_id - - fine_tuned_model_checkpoint - - id - - metrics - - object - - step_number - type: object - properties: - id: - type: string - description: 'The checkpoint identifier, which can be referenced in the API endpoints.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the checkpoint was created. - fine_tuned_model_checkpoint: - type: string - description: The name of the fine-tuned checkpoint model that is created. - step_number: - type: integer - description: The step number that the checkpoint was created at. - metrics: - type: object - properties: - step: - type: number - train_loss: - type: number - train_mean_token_accuracy: - type: number - valid_loss: - type: number - valid_mean_token_accuracy: - type: number - full_valid_loss: - type: number - full_valid_mean_token_accuracy: - type: number - description: Metrics at the step number during the fine-tuning job. - fine_tuning_job_id: - type: string - description: The name of the fine-tuning job that this checkpoint was created from. - object: - enum: - - fine_tuning.job.checkpoint - type: string - description: 'The object type, which is always "fine_tuning.job.checkpoint".' - description: "The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use.\n" - x-oaiMeta: - name: The fine-tuning job checkpoint object - example: "{\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P\",\n \"created_at\": 1712211699,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88\",\n \"fine_tuning_job_id\": \"ftjob-fpbNQ3H1GrMehXRf8cO97xTN\",\n \"metrics\": {\n \"step\": 88,\n \"train_loss\": 0.478,\n \"train_mean_token_accuracy\": 0.924,\n \"valid_loss\": 10.112,\n \"valid_mean_token_accuracy\": 0.145,\n \"full_valid_loss\": 0.567,\n \"full_valid_mean_token_accuracy\": 0.944\n },\n \"step_number\": 88\n}\n" - FinetuneChatRequestInput: - type: object - properties: - messages: - minItems: 1 - type: array - items: - oneOf: - - $ref: '#/components/schemas/ChatCompletionRequestSystemMessage' - - $ref: '#/components/schemas/ChatCompletionRequestUserMessage' - - $ref: '#/components/schemas/FineTuneChatCompletionRequestAssistantMessage' - - $ref: '#/components/schemas/ChatCompletionRequestToolMessage' - - $ref: '#/components/schemas/ChatCompletionRequestFunctionMessage' - x-oaiExpandable: true - tools: - type: array - items: - $ref: '#/components/schemas/ChatCompletionTool' - description: A list of tools the model may generate JSON inputs for. - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - functions: - maxItems: 128 - minItems: 1 - type: array - items: - $ref: '#/components/schemas/ChatCompletionFunctions' - description: A list of functions the model may generate JSON inputs for. - deprecated: true - description: The per-line training example of a fine-tuning input file for chat models - x-oaiMeta: - name: Training format for chat models - example: "{\n \"messages\": [\n { \"role\": \"user\", \"content\": \"What is the weather in San Francisco?\" },\n {\n \"role\": \"assistant\",\n \"tool_calls\": [\n {\n \"id\": \"call_id\",\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"arguments\": \"{\\\"location\\\": \\\"San Francisco, USA\\\", \\\"format\\\": \\\"celsius\\\"}\"\n }\n }\n ]\n }\n ],\n \"parallel_tool_calls\": false,\n \"tools\": [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"get_current_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and country, eg. San Francisco, USA\"\n },\n \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\"] }\n },\n \"required\": [\"location\", \"format\"]\n }\n }\n }\n ]\n}\n" - FinetuneCompletionRequestInput: - type: object - properties: - prompt: - type: string - description: The input prompt for this training example. - completion: - type: string - description: The desired completion for this training example. - description: The per-line training example of a fine-tuning input file for completions models - x-oaiMeta: - name: Training format for completions models - example: "{\n \"prompt\": \"What is the answer to 2+2\",\n \"completion\": \"4\"\n}\n" - CompletionUsage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of tokens in the generated completion. - prompt_tokens: - type: integer - description: Number of tokens in the prompt. - total_tokens: - type: integer - description: Total number of tokens used in the request (prompt + completion). - description: Usage statistics for the completion request. - RunCompletionUsage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: 'Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.).' - nullable: true - RunStepCompletionUsage: - required: - - prompt_tokens - - completion_tokens - - total_tokens - type: object - properties: - completion_tokens: - type: integer - description: Number of completion tokens used over the course of the run step. - prompt_tokens: - type: integer - description: Number of prompt tokens used over the course of the run step. - total_tokens: - type: integer - description: Total number of tokens used (prompt + completion). - description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. - nullable: true - AssistantsApiResponseFormatOption: - oneOf: - - enum: - - auto - type: string - description: "`auto` is the default value\n" - - $ref: '#/components/schemas/ResponseFormatText' - - $ref: '#/components/schemas/ResponseFormatJsonObject' - - $ref: '#/components/schemas/ResponseFormatJsonSchema' - description: "Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.\n" - x-oaiExpandable: true - AssistantObject: - title: Assistant - required: - - id - - object - - created_at - - name - - description - - model - - instructions - - tools - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - assistant - type: string - description: 'The object type, which is always `assistant`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the assistant was created. - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - model: - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - instructions: - maxLength: 256000 - type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n" - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or temperature but not both.\n" - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: Represents an `assistant` that can call the model and use tools. - x-oaiMeta: - name: The assistant object - beta: true - example: "{\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698984975,\n \"name\": \"Math Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a personal math tutor. When asked a question, write and run Python code to answer the question.\",\n \"tools\": [\n {\n \"type\": \"code_interpreter\"\n }\n ],\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n}\n" - CreateAssistantRequest: - required: - - model - type: object - properties: - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - example: gpt-4o - x-oaiTypeLabel: string - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - instructions: - maxLength: 256000 - type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - x-oaiTypeLabel: map - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ModifyAssistantRequest: - type: object - properties: - model: - anyOf: - - type: string - description: "ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.\n" - name: - maxLength: 256 - type: string - description: "The name of the assistant. The maximum length is 256 characters.\n" - nullable: true - description: - maxLength: 512 - type: string - description: "The description of the assistant. The maximum length is 512 characters.\n" - nullable: true - instructions: - maxLength: 256000 - type: string - description: "The system instructions that the assistant uses. The maximum length is 256,000 characters.\n" - nullable: true - tools: - maxItems: 128 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: "A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.\n" - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - DeleteAssistantResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - assistant.deleted - type: string - ListAssistantsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/AssistantObject' - first_id: - type: string - example: asst_abc123 - last_id: - type: string - example: asst_abc456 - has_more: - type: boolean - example: false - x-oaiMeta: - name: List assistants response object - group: chat - example: "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"asst_abc123\",\n \"object\": \"assistant\",\n \"created_at\": 1698982736,\n \"name\": \"Coding Tutor\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc456\",\n \"object\": \"assistant\",\n \"created_at\": 1698982718,\n \"name\": \"My Assistant\",\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": \"You are a helpful assistant designed to make me better at coding!\",\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n },\n {\n \"id\": \"asst_abc789\",\n \"object\": \"assistant\",\n \"created_at\": 1698982643,\n \"name\": null,\n \"description\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [],\n \"tool_resources\": {},\n \"metadata\": {},\n \"top_p\": 1.0,\n \"temperature\": 1.0,\n \"response_format\": \"auto\"\n }\n ],\n \"first_id\": \"asst_abc123\",\n \"last_id\": \"asst_abc789\",\n \"has_more\": false\n}\n" - AssistantToolsCode: - title: Code interpreter tool - required: - - type - type: object - properties: - type: - enum: - - code_interpreter - type: string - description: 'The type of tool being defined: `code_interpreter`' - AssistantToolsFileSearch: - title: FileSearch tool - required: - - type - type: object - properties: - type: - enum: - - file_search - type: string - description: 'The type of tool being defined: `file_search`' - file_search: - type: object - properties: - max_num_results: - maximum: 50 - minimum: 1 - type: integer - description: "The maximum number of results the file search tool should output. The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between 1 and 50 inclusive.\n\nNote that the file search tool may output fewer than `max_num_results` results. See the [file search tool documentation](/docs/assistants/tools/file-search/number-of-chunks-returned) for more information.\n" - description: Overrides for the file search tool. - AssistantToolsFileSearchTypeOnly: - title: FileSearch tool - required: - - type - type: object - properties: - type: - enum: - - file_search - type: string - description: 'The type of tool being defined: `file_search`' - AssistantToolsFunction: - title: Function tool - required: - - type - - function - type: object - properties: - type: - enum: - - function - type: string - description: 'The type of tool being defined: `function`' - function: - $ref: '#/components/schemas/FunctionObject' - TruncationObject: - title: Thread Truncation Controls - required: - - type - type: object - properties: - type: - enum: - - auto - - last_messages - type: string - description: 'The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.' - last_messages: - minimum: 1 - type: integer - description: The number of most recent messages from the thread when constructing the context for the run. - nullable: true - description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. - AssistantsApiToolChoiceOption: - oneOf: - - enum: - - none - - auto - - required - type: string - description: "`none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.\n" - - $ref: '#/components/schemas/AssistantsNamedToolChoice' - description: "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tools and instead generates a message.\n`auto` is the default value and means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools before responding to the user.\nSpecifying a particular tool like `{\"type\": \"file_search\"}` or `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n" - x-oaiExpandable: true - AssistantsNamedToolChoice: - required: - - type - type: object - properties: - type: - enum: - - function - - code_interpreter - - file_search - type: string - description: 'The type of the tool. If type is `function`, the function name must be set' - function: - required: - - name - type: object - properties: - name: - type: string - description: The name of the function to call. - description: Specifies a tool the model should use. Use to force the model to call a specific tool. - RunObject: - title: A run on a thread - required: - - id - - object - - created_at - - thread_id - - assistant_id - - status - - required_action - - last_error - - expires_at - - started_at - - cancelled_at - - failed_at - - completed_at - - model - - instructions - - tools - - metadata - - usage - - incomplete_details - - max_prompt_tokens - - max_completion_tokens - - truncation_strategy - - tool_choice - - parallel_tool_calls - - response_format - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread.run - type: string - description: 'The object type, which is always `thread.run`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was created. - thread_id: - type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run.' - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run.' - status: - enum: - - queued - - in_progress - - requires_action - - cancelling - - cancelled - - failed - - completed - - incomplete - - expired - type: string - description: 'The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`.' - required_action: - required: - - type - - submit_tool_outputs - type: object - properties: - type: - enum: - - submit_tool_outputs - type: string - description: 'For now, this is always `submit_tool_outputs`.' - submit_tool_outputs: - required: - - tool_calls - type: object - properties: - tool_calls: - type: array - items: - $ref: '#/components/schemas/RunToolCallObject' - description: A list of the relevant tool calls. - description: Details on the tool outputs needed for this run to continue. - description: Details on the action required to continue the run. Will be `null` if no action is required. - nullable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - - invalid_prompt - type: string - description: 'One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.' - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run. Will be `null` if there are no errors. - nullable: true - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the run will expire. - nullable: true - started_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was started. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run failed. - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run was completed. - nullable: true - incomplete_details: - type: object - properties: - reason: - enum: - - max_completion_tokens - - max_prompt_tokens - type: string - description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. - description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. - nullable: true - model: - type: string - description: 'The model that the [assistant](/docs/api-reference/assistants) used for this run.' - instructions: - type: string - description: 'The instructions that the [assistant](/docs/api-reference/assistants) used for this run.' - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: 'The list of tools that the [assistant](/docs/api-reference/assistants) used for this run.' - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunCompletionUsage' - temperature: - type: number - description: 'The sampling temperature used for this run. If not set, defaults to 1.' - nullable: true - top_p: - type: number - description: 'The nucleus sampling value used for this run. If not set, defaults to 1.' - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens specified to have been used over the course of the run.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens specified to have been used over the course of the run.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - description: 'Represents an execution run on a [thread](/docs/api-reference/threads).' - x-oaiMeta: - name: The run object - beta: true - example: "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1698107661,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699073476,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699073498,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [{\"type\": \"file_search\"}, {\"type\": \"code_interpreter\"}],\n \"metadata\": {},\n \"incomplete_details\": null,\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" - CreateRunRequest: - required: - - thread_id - - assistant_id - type: object - properties: - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' - nullable: true - example: gpt-4o - x-oaiTypeLabel: string - instructions: - type: string - description: 'Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.' - nullable: true - additional_instructions: - type: string - description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. - nullable: true - additional_messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: Adds additional messages to the thread before creating the run. - nullable: true - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - x-oaiExpandable: true - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ListRunsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/RunObject' - first_id: - type: string - example: run_abc123 - last_id: - type: string - example: run_abc456 - has_more: - type: boolean - example: false - ModifyRunRequest: - type: object - properties: - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - SubmitToolOutputsRunRequest: - required: - - tool_outputs - type: object - properties: - tool_outputs: - type: array - items: - type: object - properties: - tool_call_id: - type: string - description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. - output: - type: string - description: The output of the tool call to be submitted to continue the run. - description: A list of tools for which the outputs are being submitted. - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - additionalProperties: false - RunToolCallObject: - required: - - id - - type - - function - type: object - properties: - id: - type: string - description: 'The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint.' - type: - enum: - - function - type: string - description: 'The type of tool call the output is required for. For now, this is always `function`.' - function: - required: - - name - - arguments - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments that the model expects you to pass to the function. - description: The function definition. - description: Tool call objects - CreateThreadAndRunRequest: - required: - - thread_id - - assistant_id - type: object - properties: - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.' - thread: - $ref: '#/components/schemas/CreateThreadRequest' - model: - anyOf: - - type: string - - enum: - - gpt-4o - - gpt-4o-2024-08-06 - - gpt-4o-2024-05-13 - - gpt-4o-mini - - gpt-4o-mini-2024-07-18 - - gpt-4-turbo - - gpt-4-turbo-2024-04-09 - - gpt-4-0125-preview - - gpt-4-turbo-preview - - gpt-4-1106-preview - - gpt-4-vision-preview - - gpt-4 - - gpt-4-0314 - - gpt-4-0613 - - gpt-4-32k - - gpt-4-32k-0314 - - gpt-4-32k-0613 - - gpt-3.5-turbo - - gpt-3.5-turbo-16k - - gpt-3.5-turbo-0613 - - gpt-3.5-turbo-1106 - - gpt-3.5-turbo-0125 - - gpt-3.5-turbo-16k-0613 - type: string - description: 'The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.' - nullable: true - example: gpt-4o - x-oaiTypeLabel: string - instructions: - type: string - description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. - nullable: true - tools: - maxItems: 20 - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearch' - - $ref: '#/components/schemas/AssistantToolsFunction' - description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. - nullable: true - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant.\n" - description: "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - temperature: - maximum: 2 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - top_p: - maximum: 1 - minimum: 0 - type: number - description: empty - default: 1 - nullable: true - example: 1 - stream: - type: boolean - description: "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.\n" - nullable: true - max_prompt_tokens: - minimum: 256 - type: integer - description: "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - max_completion_tokens: - minimum: 256 - type: integer - description: "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.\n" - nullable: true - truncation_strategy: - $ref: '#/components/schemas/TruncationObject' - tool_choice: - $ref: '#/components/schemas/AssistantsApiToolChoiceOption' - parallel_tool_calls: - $ref: '#/components/schemas/ParallelToolCalls' - response_format: - $ref: '#/components/schemas/AssistantsApiResponseFormatOption' - additionalProperties: false - ThreadObject: - title: Thread - required: - - id - - object - - created_at - - tool_resources - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread - type: string - description: 'The object type, which is always `thread`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the thread was created. - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: 'Represents a thread that contains [messages](/docs/api-reference/messages).' - x-oaiMeta: - name: The thread object - beta: true - example: "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1698107661,\n \"metadata\": {}\n}\n" - CreateThreadRequest: - type: object - properties: - messages: - type: array - items: - $ref: '#/components/schemas/CreateMessageRequest' - description: 'A list of [messages](/docs/api-reference/messages) to start the thread with.' - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - oneOf: - - required: - - vector_store_ids - - required: - - vector_stores - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - vector_stores: - maxItems: 1 - type: array - items: - type: object - properties: - file_ids: - maxItems: 10000 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store.\n" - chunking_strategy: - type: object - oneOf: - - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - additionalProperties: false - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - x-oaiTypeLabel: map - x-oaiExpandable: true - description: "A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ModifyThreadRequest: - type: object - properties: - tool_resources: - type: object - properties: - code_interpreter: - type: object - properties: - file_ids: - maxItems: 20 - type: array - items: - type: string - description: "A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool.\n" - file_search: - type: object - properties: - vector_store_ids: - maxItems: 1 - type: array - items: - type: string - description: "The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread.\n" - description: "A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.\n" - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - DeleteThreadResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - thread.deleted - type: string - ListThreadsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/ThreadObject' - first_id: - type: string - example: asst_abc123 - last_id: - type: string - example: asst_abc456 - has_more: - type: boolean - example: false - MessageObject: - title: The message object - required: - - id - - object - - created_at - - thread_id - - status - - incomplete_details - - completed_at - - incomplete_at - - role - - content - - assistant_id - - run_id - - attachments - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - thread.message - type: string - description: 'The object type, which is always `thread.message`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was created. - thread_id: - type: string - description: 'The [thread](/docs/api-reference/threads) ID that this message belongs to.' - status: - enum: - - in_progress - - incomplete - - completed - type: string - description: 'The status of the message, which can be either `in_progress`, `incomplete`, or `completed`.' - incomplete_details: - required: - - reason - type: object - properties: - reason: - enum: - - content_filter - - max_tokens - - run_cancelled - - run_expired - - run_failed - type: string - description: The reason the message is incomplete. - description: 'On an incomplete message, details about why the message is incomplete.' - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was completed. - nullable: true - incomplete_at: - type: integer - description: The Unix timestamp (in seconds) for when the message was marked as incomplete. - nullable: true - role: - enum: - - user - - assistant - type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageContentTextObject' - - $ref: '#/components/schemas/MessageContentRefusalObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - assistant_id: - type: string - description: 'If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message.' - nullable: true - run_id: - type: string - description: 'The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints.' - nullable: true - attachments: - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they were added to.' - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: 'Represents a message within a [thread](/docs/api-reference/threads).' - x-oaiMeta: - name: The message object - beta: true - example: "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1698983503,\n \"thread_id\": \"thread_abc123\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hi! How can I help you today?\",\n \"annotations\": []\n }\n }\n ],\n \"assistant_id\": \"asst_abc123\",\n \"run_id\": \"run_abc123\",\n \"attachments\": [],\n \"metadata\": {}\n}\n" - MessageDeltaObject: - title: Message delta object - required: - - id - - object - - delta - type: object - properties: - id: - type: string - description: 'The identifier of the message, which can be referenced in API endpoints.' - object: - enum: - - thread.message.delta - type: string - description: 'The object type, which is always `thread.message.delta`.' - delta: - type: object - properties: - role: - enum: - - user - - assistant - type: string - description: The entity that produced the message. One of `user` or `assistant`. - content: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentImageFileObject' - - $ref: '#/components/schemas/MessageDeltaContentTextObject' - - $ref: '#/components/schemas/MessageDeltaContentRefusalObject' - - $ref: '#/components/schemas/MessageDeltaContentImageUrlObject' - x-oaiExpandable: true - description: The content of the message in array of text and/or images. - description: The delta containing the fields that have changed on the Message. - description: "Represents a message delta i.e. any changed fields on a message during streaming.\n" - x-oaiMeta: - name: The message delta object - beta: true - example: "{\n \"id\": \"msg_123\",\n \"object\": \"thread.message.delta\",\n \"delta\": {\n \"content\": [\n {\n \"index\": 0,\n \"type\": \"text\",\n \"text\": { \"value\": \"Hello\", \"annotations\": [] }\n }\n ]\n }\n}\n" - CreateMessageRequest: - required: - - role - - content - type: object - properties: - role: - enum: - - user - - assistant - type: string - description: "The role of the entity that is creating the message. Allowed values include:\n- `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n- `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.\n" - content: - oneOf: - - title: Text content - type: string - description: The text contents of the message. - - title: Array of content parts - minItems: 1 - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentImageFileObject' - - $ref: '#/components/schemas/MessageContentImageUrlObject' - - $ref: '#/components/schemas/MessageRequestContentTextObject' - x-oaiExpandable: true - description: 'An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](/docs/models/overview).' - x-oaiExpandable: true - attachments: - required: - - file_id - - tools - type: array - items: - type: object - properties: - file_id: - type: string - description: The ID of the file to attach to the message. - tools: - type: array - items: - oneOf: - - $ref: '#/components/schemas/AssistantToolsCode' - - $ref: '#/components/schemas/AssistantToolsFileSearchTypeOnly' - x-oaiExpandable: true - description: The tools to add this file to. - description: 'A list of files attached to the message, and the tools they should be added to.' - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ModifyMessageRequest: - type: object - properties: - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - DeleteMessageResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - thread.message.deleted - type: string - ListMessagesResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/MessageObject' - first_id: - type: string - example: msg_abc123 - last_id: - type: string - example: msg_abc123 - has_more: - type: boolean - example: false - MessageContentImageFileObject: - title: Image file - required: - - type - - image_file - type: object - properties: - type: - enum: - - image_file - type: string - description: Always `image_file`. - image_file: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageDeltaContentImageFileObject: - title: Image file - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the content part in the message. - type: - enum: - - image_file - type: string - description: Always `image_file`. - image_file: - type: object - properties: - file_id: - type: string - description: 'The [File](/docs/api-reference/files) ID of the image in the message content. Set `purpose="vision"` when uploading the File if you need to later display the file content.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image if specified by the user. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: 'References an image [File](/docs/api-reference/files) in the content of a message.' - MessageContentImageUrlObject: - title: Image URL - required: - - type - - image_url - type: object - properties: - type: - enum: - - image_url - type: string - description: The type of the content part. - image_url: - required: - - url - type: object - properties: - url: - type: string - description: 'The external URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - format: uri - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`. Default value is `auto`' - default: auto - description: References an image URL in the content of a message. - MessageDeltaContentImageUrlObject: - title: Image URL - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the content part in the message. - type: - enum: - - image_url - type: string - description: Always `image_url`. - image_url: - type: object - properties: - url: - type: string - description: 'The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp.' - detail: - enum: - - auto - - low - - high - type: string - description: 'Specifies the detail level of the image. `low` uses fewer tokens, you can opt in to high resolution using `high`.' - default: auto - description: References an image URL in the content of a message. - MessageContentTextObject: - title: Text - required: - - type - - text - type: object - properties: - type: - enum: - - text - type: string - description: Always `text`. - text: - required: - - value - - annotations - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageContentRefusalObject: - title: Refusal - required: - - type - - refusal - type: object - properties: - type: - enum: - - refusal - type: string - description: Always `refusal`. - refusal: - type: string - description: The refusal content generated by the assistant. - MessageRequestContentTextObject: - title: Text - required: - - type - - text - type: object - properties: - type: - enum: - - text - type: string - description: Always `text`. - text: - type: string - description: Text content to be sent to the model - description: The text content that is part of a message. - MessageContentTextAnnotationsFileCitationObject: - title: File citation - required: - - type - - text - - file_citation - - start_index - - end_index - type: object - properties: - type: - enum: - - file_citation - type: string - description: Always `file_citation`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_citation: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageContentTextAnnotationsFilePathObject: - title: File path - required: - - type - - text - - file_path - - start_index - - end_index - type: object - properties: - type: - enum: - - file_path - type: string - description: Always `file_path`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_path: - required: - - file_id - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - MessageDeltaContentTextObject: - title: Text - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the content part in the message. - type: - enum: - - text - type: string - description: Always `text`. - text: - type: object - properties: - value: - type: string - description: The data that makes up the text. - annotations: - type: array - items: - oneOf: - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject' - - $ref: '#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject' - x-oaiExpandable: true - description: The text content that is part of a message. - MessageDeltaContentRefusalObject: - title: Refusal - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the refusal part in the message. - type: - enum: - - refusal - type: string - description: Always `refusal`. - refusal: - type: string - description: The refusal content that is part of a message. - MessageDeltaContentTextAnnotationsFileCitationObject: - title: File citation - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - enum: - - file_citation - type: string - description: Always `file_citation`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_citation: - type: object - properties: - file_id: - type: string - description: The ID of the specific File the citation is from. - quote: - type: string - description: The specific quote in the file. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. - MessageDeltaContentTextAnnotationsFilePathObject: - title: File path - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the annotation in the text content part. - type: - enum: - - file_path - type: string - description: Always `file_path`. - text: - type: string - description: The text in the message content that needs to be replaced. - file_path: - type: object - properties: - file_id: - type: string - description: The ID of the file that was generated. - start_index: - minimum: 0 - type: integer - end_index: - minimum: 0 - type: integer - description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. - RunStepObject: - title: Run steps - required: - - id - - object - - created_at - - assistant_id - - thread_id - - run_id - - type - - status - - step_details - - last_error - - expired_at - - cancelled_at - - failed_at - - completed_at - - metadata - - usage - type: object - properties: - id: - type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' - object: - enum: - - thread.run.step - type: string - description: 'The object type, which is always `thread.run.step`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was created. - assistant_id: - type: string - description: 'The ID of the [assistant](/docs/api-reference/assistants) associated with the run step.' - thread_id: - type: string - description: 'The ID of the [thread](/docs/api-reference/threads) that was run.' - run_id: - type: string - description: 'The ID of the [run](/docs/api-reference/runs) that this run step is a part of.' - type: - enum: - - message_creation - - tool_calls - type: string - description: 'The type of run step, which can be either `message_creation` or `tool_calls`.' - status: - enum: - - in_progress - - cancelled - - failed - - completed - - expired - type: string - description: 'The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.' - step_details: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsObject' - description: The details of the run step. - x-oaiExpandable: true - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - rate_limit_exceeded - type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this run step. Will be `null` if there are no errors. - nullable: true - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. - nullable: true - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step was cancelled. - nullable: true - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step failed. - nullable: true - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the run step completed. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - usage: - $ref: '#/components/schemas/RunStepCompletionUsage' - description: "Represents a step in execution of a run.\n" - x-oaiMeta: - name: The run step object - beta: true - example: "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" - RunStepDeltaObject: - title: Run step delta object - required: - - id - - object - - delta - type: object - properties: - id: - type: string - description: 'The identifier of the run step, which can be referenced in API endpoints.' - object: - enum: - - thread.run.step.delta - type: string - description: 'The object type, which is always `thread.run.step.delta`.' - delta: - type: object - properties: - step_details: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsObject' - description: The details of the run step. - x-oaiExpandable: true - description: The delta containing the fields that have changed on the run step. - description: "Represents a run step delta i.e. any changed fields on a run step during streaming.\n" - x-oaiMeta: - name: The run step delta object - beta: true - example: "{\n \"id\": \"step_123\",\n \"object\": \"thread.run.step.delta\",\n \"delta\": {\n \"step_details\": {\n \"type\": \"tool_calls\",\n \"tool_calls\": [\n {\n \"index\": 0,\n \"id\": \"call_123\",\n \"type\": \"code_interpreter\",\n \"code_interpreter\": { \"input\": \"\", \"outputs\": [] }\n }\n ]\n }\n }\n}\n" - ListRunStepsResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/RunStepObject' - first_id: - type: string - example: step_abc123 - last_id: - type: string - example: step_abc456 - has_more: - type: boolean - example: false - RunStepDetailsMessageCreationObject: - title: Message creation - required: - - type - - message_creation - type: object - properties: - type: - enum: - - message_creation - type: string - description: Always `message_creation`. - message_creation: - required: - - message_id - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDeltaStepDetailsMessageCreationObject: - title: Message creation - required: - - type - type: object - properties: - type: - enum: - - message_creation - type: string - description: Always `message_creation`. - message_creation: - type: object - properties: - message_id: - type: string - description: The ID of the message that was created by this run step. - description: Details of the message creation by the run step. - RunStepDetailsToolCallsObject: - title: Tool calls - required: - - type - - tool_calls - type: object - properties: - type: - enum: - - tool_calls - type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDeltaStepDetailsToolCallsObject: - title: Tool calls - required: - - type - type: object - properties: - type: - enum: - - tool_calls - type: string - description: Always `tool_calls`. - tool_calls: - type: array - items: - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject' - x-oaiExpandable: true - description: "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`.\n" - description: Details of the tool call. - RunStepDetailsToolCallsCodeObject: - title: Code Interpreter tool call - required: - - id - - type - - code_interpreter - type: object - properties: - id: - type: string - description: The ID of the tool call. - type: - enum: - - code_interpreter - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: - required: - - input - - outputs - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDeltaStepDetailsToolCallsCodeObject: - title: Code interpreter tool call - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call. - type: - enum: - - code_interpreter - type: string - description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. - code_interpreter: - type: object - properties: - input: - type: string - description: The input to the Code Interpreter tool call. - outputs: - type: array - items: - type: object - oneOf: - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject' - - $ref: '#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject' - x-oaiExpandable: true - description: 'The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type.' - description: The Code Interpreter tool call definition. - description: Details of the Code Interpreter tool call the run step was involved in. - RunStepDetailsToolCallsCodeOutputLogsObject: - title: Code Interpreter log output - required: - - type - - logs - type: object - properties: - type: - enum: - - logs - type: string - description: Always `logs`. - logs: - type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: - title: Code interpreter log output - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - enum: - - logs - type: string - description: Always `logs`. - logs: - type: string - description: The text output from the Code Interpreter tool call. - description: Text output from the Code Interpreter tool call as part of a run step. - RunStepDetailsToolCallsCodeOutputImageObject: - title: Code Interpreter image output - required: - - type - - image - type: object - properties: - type: - enum: - - image - type: string - description: Always `image`. - image: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: - title: Code interpreter image output - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the output in the outputs array. - type: - enum: - - image - type: string - description: Always `image`. - image: - type: object - properties: - file_id: - type: string - description: 'The [file](/docs/api-reference/files) ID of the image.' - RunStepDetailsToolCallsFileSearchObject: - title: File search tool call - required: - - id - - type - - file_search - type: object - properties: - id: - type: string - description: The ID of the tool call object. - type: - enum: - - file_search - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDeltaStepDetailsToolCallsFileSearchObject: - title: File search tool call - required: - - index - - type - - file_search - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - enum: - - file_search - type: string - description: The type of tool call. This is always going to be `file_search` for this type of tool call. - file_search: - type: object - description: 'For now, this is always going to be an empty object.' - x-oaiTypeLabel: map - RunStepDetailsToolCallsFunctionObject: - title: Function tool call - required: - - id - - type - - function - type: object - properties: - id: - type: string - description: The ID of the tool call object. - type: - enum: - - function - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - required: - - name - - arguments - - output - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - RunStepDeltaStepDetailsToolCallsFunctionObject: - title: Function tool call - required: - - index - - type - type: object - properties: - index: - type: integer - description: The index of the tool call in the tool calls array. - id: - type: string - description: The ID of the tool call object. - type: - enum: - - function - type: string - description: The type of tool call. This is always going to be `function` for this type of tool call. - function: - type: object - properties: - name: - type: string - description: The name of the function. - arguments: - type: string - description: The arguments passed to the function. - output: - type: string - description: 'The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet.' - nullable: true - description: The definition of the function that was called. - VectorStoreExpirationAfter: - title: Vector store expiration policy - required: - - anchor - - days - type: object - properties: - anchor: - enum: - - last_active_at - type: string - description: 'Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.' - days: - maximum: 365 - minimum: 1 - type: integer - description: The number of days after the anchor time that the vector store will expire. - description: The expiration policy for a vector store. - VectorStoreObject: - title: Vector store - required: - - id - - object - - usage_bytes - - created_at - - status - - last_active_at - - name - - file_counts - - metadata - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store - type: string - description: 'The object type, which is always `vector_store`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store was created. - name: - type: string - description: The name of the vector store. - usage_bytes: - type: integer - description: The total number of bytes used by the files in the vector store. - file_counts: - required: - - in_progress - - completed - - failed - - cancelled - - total - type: object - properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been successfully processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that were cancelled. - total: - type: integer - description: The total number of files. - status: - enum: - - expired - - in_progress - - completed - type: string - description: 'The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use.' - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store will expire. - nullable: true - last_active_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store was last active. - nullable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - description: A vector store is a collection of processed files can be used by the `file_search` tool. - x-oaiMeta: - name: The vector store object - beta: true - example: "{\n \"id\": \"vs_123\",\n \"object\": \"vector_store\",\n \"created_at\": 1698107661,\n \"usage_bytes\": 123456,\n \"last_active_at\": 1698107661,\n \"name\": \"my_vector_store\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"cancelled\": 0,\n \"failed\": 0,\n \"total\": 100\n },\n \"metadata\": {},\n \"last_used_at\": 1698107661\n}\n" - CreateVectorStoreRequest: - type: object - properties: - file_ids: - maxItems: 500 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - name: - type: string - description: The name of the vector store. - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty.' - x-oaiExpandable: true - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - UpdateVectorStoreRequest: - type: object - properties: - name: - type: string - description: The name of the vector store. - nullable: true - expires_after: - $ref: '#/components/schemas/VectorStoreExpirationAfter' + - url: https://api.openai.com/v1 +tags: + - name: Assistants + description: Build Assistants that can call models and use tools. + - name: Audio + description: Turn audio into text or text into audio. + - name: Chat + description: Given a list of messages comprising a conversation, the model will + return a response. + - name: Completions + description: Given a prompt, the model will return one or more predicted + completions, and can also return the probabilities of alternative tokens + at each position. + - name: Embeddings + description: Get a vector representation of a given input that can be easily + consumed by machine learning models and algorithms. + - name: Fine-tuning + description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Batch + description: Create large batches of API requests to run asynchronously. + - name: Files + description: Files are used to upload documents that can be used with features + like Assistants and Fine-tuning. + - name: Uploads + description: Use Uploads to upload large files in multiple parts. + - name: Images + description: Given a prompt and/or an input image, the model will generate a new image. + - name: Models + description: List and describe the various models available in the API. + - name: Moderations + description: Given text and/or image inputs, classifies if those inputs are + potentially harmful. + - name: Audit Logs + description: List user actions and configuration changes within this organization. +paths: + /assistants: + get: + operationId: listAssistants + tags: + - Assistants + summary: Returns a list of assistants. + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListAssistantsResponse" + x-oaiMeta: + name: List assistants + group: assistants + beta: true + returns: A list of [assistant](/docs/api-reference/assistants/object) objects. + examples: + request: + curl: | + curl "https://api.openai.com/v1/assistants?order=desc&limit=20" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_assistants = client.beta.assistants.list( + order="desc", + limit="20", + ) + print(my_assistants.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistants = await openai.beta.assistants.list({ + order: "desc", + limit: "20", + }); + + console.log(myAssistants.data); + } + + main(); + response: > + { + "object": "list", + "data": [ + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4o", + "instructions": null, + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false + } + post: + operationId: createAssistant + tags: + - Assistants + summary: Create an assistant with a model and instructions. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Create assistant + group: assistants + beta: true + returns: An [assistant](/docs/api-reference/assistants/object) object. + examples: + - title: Code Interpreter + request: + curl: > + curl "https://api.openai.com/v1/assistants" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "name": "Math Tutor", + "tools": [{"type": "code_interpreter"}], + "model": "gpt-4o" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + my_assistant = client.beta.assistants.create( + instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name="Math Tutor", + tools=[{"type": "code_interpreter"}], + model="gpt-4o", + ) + + print(my_assistant) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name: "Math Tutor", + tools: [{ type: "code_interpreter" }], + model: "gpt-4o", + }); + + console.log(myAssistant); + } + + + main(); + response: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698984975, + "name": "Math Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + - title: Files + request: + curl: > + curl https://api.openai.com/v1/assistants \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [{"type": "file_search"}], + "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, + "model": "gpt-4o" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + my_assistant = client.beta.assistants.create( + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", + name="HR Helper", + tools=[{"type": "file_search"}], + tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, + model="gpt-4o" + ) + + print(my_assistant) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies.", + name: "HR Helper", + tools: [{ type: "file_search" }], + tool_resources: { + file_search: { + vector_store_ids: ["vs_123"] + } + }, + model: "gpt-4o" + }); + + console.log(myAssistant); + } + + + main(); + response: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009403, + "name": "HR Helper", + "description": null, + "model": "gpt-4o", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": ["vs_123"] + } + }, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + /assistants/{assistant_id}: + get: + operationId: getAssistant + tags: + - Assistants + summary: Retrieves an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Retrieve assistant + group: assistants + beta: true + returns: The [assistant](/docs/api-reference/assistants/object) object matching + the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.retrieve("asst_abc123") + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.retrieve( + "asst_abc123" + ); + + console.log(myAssistant); + } + + main(); + response: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4o", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "file_search" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + post: + operationId: modifyAssistant + tags: + - Assistants + summary: Modifies an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Modify assistant + group: assistants + beta: true + returns: The modified [assistant](/docs/api-reference/assistants/object) object. + examples: + request: + curl: > + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [{"type": "file_search"}], + "model": "gpt-4o" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + my_updated_assistant = client.beta.assistants.update( + "asst_abc123", + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name="HR Helper", + tools=[{"type": "file_search"}], + model="gpt-4o" + ) + + + print(my_updated_assistant) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const myUpdatedAssistant = await openai.beta.assistants.update( + "asst_abc123", + { + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name: "HR Helper", + tools: [{ type: "file_search" }], + model: "gpt-4o" + } + ); + + console.log(myUpdatedAssistant); + } + + + main(); + response: > + { + "id": "asst_123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4o", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": [] + } + }, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + delete: + operationId: deleteAssistant + tags: + - Assistants + summary: Delete an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteAssistantResponse" + x-oaiMeta: + name: Delete assistant + group: assistants + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + response = client.beta.assistants.delete("asst_abc123") + print(response) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const response = await openai.beta.assistants.del("asst_abc123"); + + console.log(response); + } + + main(); + response: | + { + "id": "asst_abc123", + "object": "assistant.deleted", + "deleted": true + } + /audio/speech: + post: + operationId: createSpeech + tags: + - Audio + summary: Generates audio from the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateSpeechRequest" + responses: + "200": + description: OK + headers: + Transfer-Encoding: + schema: + type: string + description: chunked + content: + application/octet-stream: + schema: + type: string + format: binary + x-oaiMeta: + name: Create speech + group: audio + returns: The audio file content. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/speech \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "tts-1", + "input": "The quick brown fox jumped over the lazy dog.", + "voice": "alloy" + }' \ + --output speech.mp3 + python: | + from pathlib import Path + import openai + + speech_file_path = Path(__file__).parent / "speech.mp3" + response = openai.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + response.stream_to_file(speech_file_path) + node: > + import fs from "fs"; + + import path from "path"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const speechFile = path.resolve("./speech.mp3"); + + + async function main() { + const mp3 = await openai.audio.speech.create({ + model: "tts-1", + voice: "alloy", + input: "Today is a wonderful day to build something people love!", + }); + console.log(speechFile); + const buffer = Buffer.from(await mp3.arrayBuffer()); + await fs.promises.writeFile(speechFile, buffer); + } + + main(); + /audio/transcriptions: + post: + operationId: createTranscription + tags: + - Audio + summary: Transcribes audio into the input language. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranscriptionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CreateTranscriptionResponseJson" + - $ref: "#/components/schemas/CreateTranscriptionResponseVerboseJson" + x-oaiMeta: + name: Create transcription + group: audio + returns: The [transcription object](/docs/api-reference/audio/json-object) or a + [verbose transcription + object](/docs/api-reference/audio/verbose-json-object). + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + node: > + import fs from "fs"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + }); + + console.log(transcription.text); + } + + main(); + response: > + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + - title: Word timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=word" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["word"] + ) + + print(transcript.words) + node: > + import fs from "fs"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["word"] + }); + + console.log(transcription.text); + } + + main(); + response: > + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "words": [ + { + "word": "The", + "start": 0.0, + "end": 0.23999999463558197 + }, + ... + { + "word": "volleyball", + "start": 7.400000095367432, + "end": 7.900000095367432 + } + ] + } + - title: Segment timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=segment" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["segment"] + ) + + print(transcript.words) + node: > + import fs from "fs"; + + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["segment"] + }); + + console.log(transcription.text); + } + + main(); + response: > + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "segments": [ + { + "id": 0, + "seek": 0, + "start": 0.0, + "end": 3.319999933242798, + "text": " The beach was a popular spot on a hot summer day.", + "tokens": [ + 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530 + ], + "temperature": 0.0, + "avg_logprob": -0.2860786020755768, + "compression_ratio": 1.2363636493682861, + "no_speech_prob": 0.00985979475080967 + }, + ... + ] + } + /audio/translations: + post: + operationId: createTranslation + tags: + - Audio + summary: Translates audio into English. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranslationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CreateTranslationResponseJson" + - $ref: "#/components/schemas/CreateTranslationResponseVerboseJson" + x-oaiMeta: + name: Create translation + group: audio + returns: The translated text. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/translations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/german.m4a" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.translations.create( + model="whisper-1", + file=audio_file + ) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const translation = await openai.audio.translations.create({ + file: fs.createReadStream("speech.mp3"), + model: "whisper-1", + }); + + console.log(translation.text); + } + main(); + response: > + { + "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" + } + /batches: + post: + summary: Creates and executes a batch from an uploaded file of requests + operationId: createBatch + tags: + - Batch + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - input_file_id + - endpoint + - completion_window + properties: + input_file_id: + type: string + description: > + The ID of an uploaded file that contains requests for the + new batch. + + + See [upload file](/docs/api-reference/files/create) for how + to upload a file. + + + Your input file must be formatted as a [JSONL + file](/docs/api-reference/batch/request-input), and must be + uploaded with the purpose `batch`. The file can contain up + to 50,000 requests, and can be up to 200 MB in size. + endpoint: + type: string + enum: + - /v1/chat/completions + - /v1/embeddings + - /v1/completions + description: The endpoint to be used for all requests in the batch. Currently + `/v1/chat/completions`, `/v1/embeddings`, and + `/v1/completions` are supported. Note that `/v1/embeddings` + batches are also restricted to a maximum of 50,000 embedding + inputs across all requests in the batch. + completion_window: + type: string + enum: + - 24h + description: The time frame within which the batch should be processed. + Currently only `24h` is supported. metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - additionalProperties: false - ListVectorStoresResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreObject' - first_id: - type: string - example: vs_abc123 - last_id: - type: string - example: vs_abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - vector_store.deleted - type: string - VectorStoreFileObject: - title: Vector store files - required: - - id - - object - - usage_bytes - - created_at - - vector_store_id - - status - - last_error - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store.file - type: string - description: 'The object type, which is always `vector_store.file`.' - usage_bytes: - type: integer - description: The total vector store usage in bytes. Note that this may be different from the original file size. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store file was created. - vector_store_id: - type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: - enum: - - in_progress - - completed - - cancelled - - failed - type: string - description: 'The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use.' - last_error: - required: - - code - - message - type: object - properties: - code: - enum: - - server_error - - unsupported_file - - invalid_file - type: string - description: One of `server_error` or `rate_limit_exceeded`. - message: - type: string - description: A human-readable description of the error. - description: The last error associated with this vector store file. Will be `null` if there are no errors. - nullable: true - chunking_strategy: - type: object - oneOf: - - $ref: '#/components/schemas/StaticChunkingStrategyResponseParam' - - $ref: '#/components/schemas/OtherChunkingStrategyResponseParam' - description: The strategy used to chunk the file. - x-oaiExpandable: true - description: A list of files attached to a vector store. - x-oaiMeta: - name: The vector store file object - beta: true - example: "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"usage_bytes\": 1234,\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"last_error\": null,\n \"chunking_strategy\": {\n \"type\": \"static\",\n \"static\": {\n \"max_chunk_size_tokens\": 800,\n \"chunk_overlap_tokens\": 400\n }\n }\n}\n" - OtherChunkingStrategyResponseParam: - title: Other Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - other - type: string - description: Always `other`. - additionalProperties: false - description: 'This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.' - StaticChunkingStrategyResponseParam: - title: Static Chunking Strategy - required: - - type - - static - type: object - properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - StaticChunkingStrategy: - required: - - max_chunk_size_tokens - - chunk_overlap_tokens - type: object - properties: - max_chunk_size_tokens: - maximum: 4096 - minimum: 100 - type: integer - description: The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - chunk_overlap_tokens: - type: integer - description: "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`.\n" - additionalProperties: false - AutoChunkingStrategyRequestParam: - title: Auto Chunking Strategy - required: - - type - type: object - properties: - type: - enum: - - auto - type: string - description: Always `auto`. - additionalProperties: false - description: The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. - StaticChunkingStrategyRequestParam: - title: Static Chunking Strategy - required: - - type - - static + type: object + additionalProperties: + type: string + description: Optional custom metadata for the batch. + nullable: true + responses: + "200": + description: Batch created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Create batch + group: batch + returns: The created [Batch](/docs/api-reference/batch/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input_file_id": "file-abc123", + "endpoint": "/v1/chat/completions", + "completion_window": "24h" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.create( + input_file_id="file-abc123", + endpoint="/v1/chat/completions", + completion_window="24h" + ) + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.create({ + input_file_id: "file-abc123", + endpoint: "/v1/chat/completions", + completion_window: "24h" + }); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/chat/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "validating", + "output_file_id": null, + "error_file_id": null, + "created_at": 1711471533, + "in_progress_at": null, + "expires_at": null, + "finalizing_at": null, + "completed_at": null, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 0, + "completed": 0, + "failed": 0 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + get: + operationId: listBatches + tags: + - Batch + summary: List your organization's batches. + parameters: + - in: query + name: after + required: false + schema: + type: string + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: Batch listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListBatchesResponse" + x-oaiMeta: + name: List batch + group: batch + returns: A list of paginated [Batch](/docs/api-reference/batch/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.list() + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.batches.list(); + + for await (const batch of list) { + console.log(batch); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/chat/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly job", + } + }, + { ... }, + ], + "first_id": "batch_abc123", + "last_id": "batch_abc456", + "has_more": true + } + /batches/{batch_id}: + get: + operationId: retrieveBatch + tags: + - Batch + summary: Retrieves a batch. + parameters: + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the batch to retrieve. + responses: + "200": + description: Batch retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Retrieve batch + group: batch + returns: The [Batch](/docs/api-reference/batch/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches/batch_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.retrieve("batch_abc123") + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.retrieve("batch_abc123"); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + /batches/{batch_id}/cancel: + post: + operationId: cancelBatch + tags: + - Batch + summary: Cancels an in-progress batch. The batch will be in status `cancelling` + for up to 10 minutes, before changing to `cancelled`, where it will have + partial results (if any) available in the output file. + parameters: + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the batch to cancel. + responses: + "200": + description: Batch is cancelling. Returns the cancelling batch's details. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Cancel batch + group: batch + returns: The [Batch](/docs/api-reference/batch/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches/batch_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.cancel("batch_abc123") + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.cancel("batch_abc123"); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/chat/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "cancelling", + "output_file_id": null, + "error_file_id": null, + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": null, + "completed_at": null, + "failed_at": null, + "expired_at": null, + "cancelling_at": 1711475133, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 23, + "failed": 1 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + /chat/completions: + post: + operationId: createChatCompletion + tags: + - Chat + summary: > + Creates a model response for the given chat conversation. Learn more in + the + + [text generation](/docs/guides/text-generation), + [vision](/docs/guides/vision), + + and [audio](/docs/guides/audio) guides. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionResponse" + x-oaiMeta: + name: Create chat completion + group: chat + returns: > + Returns a [chat completion](/docs/api-reference/chat/object) object, + or a streamed sequence of [chat completion + chunk](/docs/api-reference/chat/streaming) objects if the request is + streamed. + path: create + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_chat_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] + ) + + + print(completion.choices[0].message) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "system", content: "You are a helpful assistant." }], + model: "VAR_chat_model_id", + }); + + console.log(completion.choices[0]); + } + + + main(); + response: | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + - title: Image input + request: + curl: > + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What'\''s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ] + } + ], + "max_tokens": 300 + }' + python: > + from openai import OpenAI + + + client = OpenAI() + + + response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + } + }, + ], + } + ], + max_tokens=300, + ) + + + print(response.choices[0]) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: [ + { + role: "user", + content: [ + { type: "text", text: "What's in this image?" }, + { + type: "image_url", + image_url: { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + } + ], + }, + ], + }); + console.log(response.choices[0]); + } + + main(); + response: > + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nThis image shows a wooden boardwalk extending through a lush green marshland.", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_chat_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ], + "stream": true + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream=True + ) + + + for chunk in completion: + print(chunk.choices[0].delta) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const completion = await openai.chat.completions.create({ + model: "VAR_chat_model_id", + messages: [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream: true, + }); + + for await (const chunk of completion) { + console.log(chunk.choices[0].delta.content); + } + } + + + main(); + response: > + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + + + .... + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + - title: Functions + request: + curl: > + curl https://api.openai.com/v1/chat/completions \ + + -H "Content-Type: application/json" \ + + -H "Authorization: Bearer $OPENAI_API_KEY" \ + + -d '{ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What'\''s the weather like in Boston today?" + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "tool_choice": "auto" + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + messages = [{"role": "user", "content": "What's the weather like + in Boston today?"}] + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=messages, + tools=tools, + tool_choice="auto" + ) + + + print(completion) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + const response = await openai.chat.completions.create({ + model: "gpt-4o", + messages: messages, + tools: tools, + tool_choice: "auto", + }); + + console.log(response); + } + + + main(); + response: | + { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-4o-mini", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 17, + "total_tokens": 99, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + - title: Logprobs + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_chat_model_id", + "messages": [ + { + "role": "user", + "content": "Hello!" + } + ], + "logprobs": true, + "top_logprobs": 2 + }' + python: | + from openai import OpenAI + client = OpenAI() + + completion = client.chat.completions.create( + model="VAR_chat_model_id", + messages=[ + {"role": "user", "content": "Hello!"} + ], + logprobs=True, + top_logprobs=2 + ) + + print(completion.choices[0].message) + print(completion.choices[0].logprobs) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "user", content: "Hello!" }], + model: "VAR_chat_model_id", + logprobs: true, + top_logprobs: 2, + }); + + console.log(completion.choices[0]); + } + + main(); + response: | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1702685778, + "model": "gpt-4o-mini", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + "logprobs": { + "content": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111], + "top_logprobs": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111] + }, + { + "token": "Hi", + "logprob": -1.3190403, + "bytes": [72, 105] + } + ] + }, + { + "token": "!", + "logprob": -0.02380986, + "bytes": [ + 33 + ], + "top_logprobs": [ + { + "token": "!", + "logprob": -0.02380986, + "bytes": [33] + }, + { + "token": " there", + "logprob": -3.787621, + "bytes": [32, 116, 104, 101, 114, 101] + } + ] + }, + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119], + "top_logprobs": [ + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119] + }, + { + "token": "<|end|>", + "logprob": -10.953937, + "bytes": null + } + ] + }, + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110], + "top_logprobs": [ + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110] + }, + { + "token": " may", + "logprob": -4.161023, + "bytes": [32, 109, 97, 121] + } + ] + }, + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [ + 32, + 73 + ], + "top_logprobs": [ + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [32, 73] + }, + { + "token": " assist", + "logprob": -13.596657, + "bytes": [32, 97, 115, 115, 105, 115, 116] + } + ] + }, + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116], + "top_logprobs": [ + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116] + }, + { + "token": " help", + "logprob": -3.1089056, + "bytes": [32, 104, 101, 108, 112] + } + ] + }, + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117], + "top_logprobs": [ + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117] + }, + { + "token": " today", + "logprob": -12.807695, + "bytes": [32, 116, 111, 100, 97, 121] + } + ] + }, + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121], + "top_logprobs": [ + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121] + }, + { + "token": "?", + "logprob": -5.5247097, + "bytes": [63] + } + ] + }, + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63], + "top_logprobs": [ + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63] + }, + { + "token": "?\n", + "logprob": -7.184561, + "bytes": [63, 10] + } + ] + } + ] + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 9, + "total_tokens": 18, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "system_fingerprint": null + } + /completions: + post: + operationId: createCompletion + tags: + - Completions + summary: Creates a completion for the provided prompt and parameters. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionResponse" + x-oaiMeta: + name: Create completion + group: completions + returns: > + Returns a [completion](/docs/api-reference/completions/object) object, + or a sequence of completion objects if the request is streamed. + legacy: true + examples: + - title: No streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_completion_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0 + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.completions.create( + model="VAR_completion_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0 + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.completions.create({ + model: "VAR_completion_model_id", + prompt: "Say this is a test.", + max_tokens: 7, + temperature: 0, + }); + + console.log(completion); + } + main(); + response: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "VAR_completion_model_id", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_completion_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0, + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + for chunk in client.completions.create( + model="VAR_completion_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0, + stream=True + ): + print(chunk.choices[0].text) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.completions.create({ + model: "VAR_completion_model_id", + prompt: "Say this is a test.", + stream: true, + }); + + for await (const chunk of stream) { + console.log(chunk.choices[0].text) + } + } + main(); + response: | + { + "id": "cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe", + "object": "text_completion", + "created": 1690759702, + "choices": [ + { + "text": "This", + "index": 0, + "logprobs": null, + "finish_reason": null + } + ], + "model": "gpt-3.5-turbo-instruct" + "system_fingerprint": "fp_44709d6fcb", + } + /embeddings: + post: + operationId: createEmbedding + tags: + - Embeddings + summary: Creates an embedding vector representing the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingResponse" + x-oaiMeta: + name: Create embeddings + group: embeddings + returns: A list of [embedding](/docs/api-reference/embeddings/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/embeddings \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input": "The food was delicious and the waiter...", + "model": "text-embedding-ada-002", + "encoding_format": "float" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.embeddings.create( + model="text-embedding-ada-002", + input="The food was delicious and the waiter...", + encoding_format="float" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const embedding = await openai.embeddings.create({ + model: "text-embedding-ada-002", + input: "The quick brown fox jumped over the lazy dog", + encoding_format: "float", + }); + + console.log(embedding); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + /files: + get: + operationId: listFiles + tags: + - Files + summary: Returns a list of files. + parameters: + - in: query + name: purpose + required: false + schema: + type: string + description: Only return files with the given purpose. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 10,000, and the default is 10,000. + required: false + schema: + type: integer + default: 10000 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFilesResponse" + x-oaiMeta: + name: List files + group: files + returns: A list of [File](/docs/api-reference/files/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.files.list(); + + for await (const file of list) { + console.log(file); + } + } + + main(); + response: | + { + "data": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 175, + "created_at": 1613677385, + "filename": "salesOverview.pdf", + "purpose": "assistants", + }, + { + "id": "file-abc123", + "object": "file", + "bytes": 140, + "created_at": 1613779121, + "filename": "puppy.jsonl", + "purpose": "fine-tune", + } + ], + "object": "list" + } + post: + operationId: createFile + tags: + - Files + summary: > + Upload a file that can be used across various endpoints. Individual + files can be up to 512 MB, and the size of all files uploaded by one + organization can be up to 100 GB. + + + The Assistants API supports files up to 2 million tokens and of specific + file types. See the [Assistants Tools guide](/docs/assistants/tools) for + details. + + + The Fine-tuning API only supports `.jsonl` files. The input also has + certain required formats for fine-tuning + [chat](/docs/api-reference/fine-tuning/chat-input) or + [completions](/docs/api-reference/fine-tuning/completions-input) models. + + + The Batch API only supports `.jsonl` files up to 200 MB in size. The + input also has a specific required + [format](/docs/api-reference/batch/request-input). + + + Please [contact us](https://help.openai.com/) if you need to increase + these storage limits. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Upload file + group: files + returns: The uploaded [File](/docs/api-reference/files/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F purpose="fine-tune" \ + -F file="@mydata.jsonl" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.create( + file=open("mydata.jsonl", "rb"), + purpose="fine-tune" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.create({ + file: fs.createReadStream("mydata.jsonl"), + purpose: "fine-tune", + }); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "mydata.jsonl", + "purpose": "fine-tune", + } + /files/{file_id}: + delete: + operationId: deleteFile + tags: + - Files + summary: Delete a file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteFileResponse" + x-oaiMeta: + name: Delete file + group: files + returns: Deletion status. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.delete("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.del("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "deleted": true + } + get: + operationId: retrieveFile + tags: + - Files + summary: Returns information about a specific file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Retrieve file + group: files + returns: The [File](/docs/api-reference/files/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.retrieve("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.retrieve("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "mydata.jsonl", + "purpose": "fine-tune", + } + /files/{file_id}/content: + get: + operationId: downloadFile + tags: + - Files + summary: Returns the contents of the specified file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + type: string + x-oaiMeta: + name: Retrieve file content + group: files + returns: The file content. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123/content \ + -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl + python: | + from openai import OpenAI + client = OpenAI() + + content = client.files.content("file-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.content("file-abc123"); + + console.log(file); + } + + main(); + /fine_tuning/jobs: + post: + operationId: createFineTuningJob + tags: + - Fine-tuning + summary: > + Creates a fine-tuning job which begins the process of creating a new + model from a given dataset. + + + Response includes details of the enqueued job including job status and + the name of the fine-tuned models once complete. + + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateFineTuningJobRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Create fine-tuning job + group: fine-tuning + returns: A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "model": "gpt-4o-mini" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-4o-mini" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + } + - title: Epochs + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "model": "gpt-4o-mini", + "hyperparameters": { + "n_epochs": 2 + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-4o-mini", + hyperparameters={ + "n_epochs":2 + } + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + model: "gpt-4o-mini", + hyperparameters: { n_epochs: 2 } + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": {"n_epochs": 2}, + } + - title: Validation file + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-4o-mini" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + validation_file="file-def456", + model="gpt-4o-mini" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + } + - title: W&B Integration + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-4o-mini", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "name": "ft-run-display-name" + "tags": [ + "first-experiment", "v2" + ] + } + } + ] + }' + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "entity": None, + "run_id": "ftjob-abc123" + } + } + ] + } + get: + operationId: listPaginatedFineTuningJobs + tags: + - Fine-tuning + summary: | + List your organization's fine-tuning jobs + parameters: + - name: after + in: query + description: Identifier for the last job from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of fine-tuning jobs to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListPaginatedFineTuningJobsResponse" + x-oaiMeta: + name: List fine-tuning jobs + group: fine-tuning + returns: A list of paginated [fine-tuning + job](/docs/api-reference/fine-tuning/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.jobs.list(); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: > + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-TjX0lMfOniCZX64t9PUQT5hn", + "created_at": 1689813489, + "level": "warn", + "message": "Fine tuning process stopping due to job cancellation", + "data": null, + "type": "message" + }, + { ... }, + { ... } + ], "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}: + get: + operationId: retrieveFineTuningJob + tags: + - Fine-tuning + summary: | + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Retrieve fine-tuning job + group: fine-tuning + returns: The [fine-tuning](/docs/api-reference/fine-tuning/object) object with + the given ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F + \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.retrieve("ftjob-abc123") + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); + + console.log(fineTune); + } + + + main(); + response: > + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "davinci-002", + "created_at": 1692661014, + "finished_at": 1692661190, + "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", + "organization_id": "org-123", + "result_files": [ + "file-abc123" + ], + "status": "succeeded", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": { + "n_epochs": 4, + "batch_size": 1, + "learning_rate_multiplier": 1.0 + }, + "trained_tokens": 5768, + "integrations": [], + "seed": 0, + "estimated_finish": 0 + } + /fine_tuning/jobs/{fine_tuning_job_id}/cancel: + post: + operationId: cancelFineTuningJob + tags: + - Fine-tuning + summary: | + Immediately cancel a fine-tune job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Cancel fine-tuning + group: fine-tuning + returns: The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.cancel("ftjob-abc123") + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-4o-mini-2024-07-18", + "created_at": 1721764800, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "hyperparameters": { + "n_epochs": "auto" + }, + "status": "cancelled", + "validation_file": "file-abc123", + "training_file": "file-abc123" + } + /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints: + get: + operationId: listFineTuningJobCheckpoints + tags: + - Fine-tuning + summary: | + List checkpoints for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get checkpoints for. + - name: after + in: query + description: Identifier for the last checkpoint ID from the previous pagination + request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of checkpoints to retrieve. + required: false + schema: + type: integer + default: 10 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobCheckpointsResponse" + x-oaiMeta: + name: List fine-tuning checkpoints + group: fine-tuning + returns: A list of fine-tuning [checkpoint + objects](/docs/api-reference/fine-tuning/checkpoint-object) for a + fine-tuning job. + examples: + request: + curl: > + curl + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints + \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: > + { + "object": "list" + "data": [ + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "created_at": 1721764867, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:96olL566:ckpt-step-2000", + "metrics": { + "full_valid_loss": 0.134, + "full_valid_mean_token_accuracy": 0.874 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 2000, + }, + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "created_at": 1721764800, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "metrics": { + "full_valid_loss": 0.167, + "full_valid_mean_token_accuracy": 0.781 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 1000, + }, + ], + "first_id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "last_id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}/events: + get: + operationId: listFineTuningEvents + tags: + - Fine-tuning + summary: | + Get status updates for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get events for. + - name: after + in: query + description: Identifier for the last event from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of events to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobEventsResponse" + x-oaiMeta: + name: List fine-tuning events + group: fine-tuning + returns: A list of fine-tuning event objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list_events( + fine_tuning_job_id="ftjob-abc123", + limit=2 + ) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + + main(); + response: > + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", + "created_at": 1721764800, + "level": "info", + "message": "Fine tuning job successfully completed", + "data": null, + "type": "message" + }, + { + "object": "fine_tuning.job.event", + "id": "ft-event-tyiGuB72evQncpH87xe505Sv", + "created_at": 1721764800, + "level": "info", + "message": "New fine-tuned model created: ft:gpt-4o-mini:openai::7p4lURel", + "data": null, + "type": "message" + } + ], + "has_more": true + } + /images/edits: + post: + operationId: createImageEdit + tags: + - Images + summary: Creates an edited or extended image given an original image and a prompt. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageEditRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image edit + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/edits \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F mask="@mask.png" \ + -F prompt="A cute baby sea otter wearing a beret" \ + -F n=2 \ + -F size="1024x1024" + python: | + from openai import OpenAI + client = OpenAI() + + client.images.edit( + image=open("otter.png", "rb"), + mask=open("mask.png", "rb"), + prompt="A cute baby sea otter wearing a beret", + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.edit({ + image: fs.createReadStream("otter.png"), + mask: fs.createReadStream("mask.png"), + prompt: "A cute baby sea otter wearing a beret", + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /images/generations: + post: + operationId: createImage + tags: + - Images + summary: Creates an image given a prompt. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateImageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/generations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "dall-e-3", + "prompt": "A cute baby sea otter", + "n": 1, + "size": "1024x1024" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); + + console.log(image.data); + } + + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /images/variations: + post: + operationId: createImageVariation + tags: + - Images + summary: Creates a variation of a given image. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageVariationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image variation + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/variations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F n=2 \ + -F size="1024x1024" + python: | + from openai import OpenAI + client = OpenAI() + + response = client.images.create_variation( + image=open("image_edit_original.png", "rb"), + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.createVariation({ + image: fs.createReadStream("otter.png"), + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /models: + get: + operationId: listModels + tags: + - Models + summary: Lists the currently available models, and provides basic information + about each one such as the owner and availability. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListModelsResponse" + x-oaiMeta: + name: List models + group: models + returns: A list of [model](/docs/api-reference/models/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.models.list(); + + for await (const model of list) { + console.log(model); + } + } + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner" + }, + { + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + }, + ], + "object": "list" + } + /models/{model}: + get: + operationId: retrieveModel + tags: + - Models + summary: Retrieves a model instance, providing basic information about the model + such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: gpt-4o-mini + description: The ID of the model to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Model" + x-oaiMeta: + name: Retrieve model + group: models + returns: The [model](/docs/api-reference/models/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/VAR_chat_model_id \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.retrieve("VAR_chat_model_id") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.retrieve("VAR_chat_model_id"); + + console.log(model); + } + + main(); + response: | + { + "id": "VAR_chat_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + } + delete: + operationId: deleteModel + tags: + - Models + summary: Delete a fine-tuned model. You must have the Owner role in your + organization to delete a model. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: ft:gpt-4o-mini:acemeco:suffix:abc123 + description: The model to delete + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteModelResponse" + x-oaiMeta: + name: Delete a fine-tuned model + group: models + returns: Deletion status. + examples: + request: + curl: > + curl + https://api.openai.com/v1/models/ft:gpt-4o-mini:acemeco:suffix:abc123 + \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.delete("ft:gpt-4o-mini:acemeco:suffix:abc123") + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const model = await openai.models.del("ft:gpt-4o-mini:acemeco:suffix:abc123"); + + console.log(model); + } + + main(); + response: | + { + "id": "ft:gpt-4o-mini:acemeco:suffix:abc123", + "object": "model", + "deleted": true + } + /moderations: + post: + operationId: createModeration + tags: + - Moderations + summary: | + Classifies if text and/or image inputs are potentially harmful. Learn + more in the [moderation guide](/docs/guides/moderation). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationResponse" + x-oaiMeta: + name: Create moderation + group: moderations + returns: A [moderation](/docs/api-reference/moderations/object) object. + examples: + - title: Single string + request: + curl: | + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + moderation = client.moderations.create(input="I want to kill + them.") + + print(moderation) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const moderation = await openai.moderations.create({ input: "I want to kill them." }); + + console.log(moderation); + } + + main(); + response: | + { + "id": "modr-AB8CjOTu2jiq12hp1AQPfeqFWaORR", + "model": "text-moderation-007", + "results": [ + { + "flagged": true, + "categories": { + "sexual": false, + "hate": false, + "harassment": true, + "self-harm": false, + "sexual/minors": false, + "hate/threatening": false, + "violence/graphic": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "harassment/threatening": true, + "violence": true + }, + "category_scores": { + "sexual": 0.000011726012417057063, + "hate": 0.22706663608551025, + "harassment": 0.5215635299682617, + "self-harm": 2.227119921371923e-6, + "sexual/minors": 7.107352217872176e-8, + "hate/threatening": 0.023547329008579254, + "violence/graphic": 0.00003391829886822961, + "self-harm/intent": 1.646940972932498e-6, + "self-harm/instructions": 1.1198755256458526e-9, + "harassment/threatening": 0.5694745779037476, + "violence": 0.9971134662628174 + } + } + ] + } + - title: Image and text + request: + curl: > + curl https://api.openai.com/v1/moderations \ + -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "omni-moderation-latest", + "input": [ + { "type": "text", "text": "...text to classify goes here..." }, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.png" + } + } + ] + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + response = client.moderations.create( + model="omni-moderation-latest", + input=[ + {"type": "text", "text": "...text to classify goes here..."}, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.png", + # can also use base64 encoded image URLs + # "url": "data:image/jpeg;base64,abcdefg..." + } + }, + ], + ) + + + print(response) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + const moderation = await openai.moderations.create({ + model: "omni-moderation-latest", + input: [ + { type: "text", text: "...text to classify goes here..." }, + { + type: "image_url", + image_url: { + url: "https://example.com/image.png" + // can also use base64 encoded image URLs + // url: "data:image/jpeg;base64,abcdefg..." + } + } + ], + }); + + + console.log(moderation); + response: | + { + "id": "modr-0d9740456c391e43c445bf0f010940c7", + "model": "omni-moderation-latest", + "results": [ + { + "flagged": true, + "categories": { + "harassment": true, + "harassment/threatening": true, + "sexual": false, + "hate": false, + "hate/threatening": false, + "illicit": false, + "illicit/violent": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "self-harm": false, + "sexual/minors": false, + "violence": true, + "violence/graphic": true + }, + "category_scores": { + "harassment": 0.8189693396524255, + "harassment/threatening": 0.804985420696006, + "sexual": 1.573112165348997e-6, + "hate": 0.007562942636942845, + "hate/threatening": 0.004208854591835476, + "illicit": 0.030535955153511665, + "illicit/violent": 0.008925306722380033, + "self-harm/intent": 0.00023023930975076432, + "self-harm/instructions": 0.0002293869201073356, + "self-harm": 0.012598046106750154, + "sexual/minors": 2.212566909570261e-8, + "violence": 0.9999992735124786, + "violence/graphic": 0.843064871157054 + }, + "category_applied_input_types": { + "harassment": [ + "text" + ], + "harassment/threatening": [ + "text" + ], + "sexual": [ + "text", + "image" + ], + "hate": [ + "text" + ], + "hate/threatening": [ + "text" + ], + "illicit": [ + "text" + ], + "illicit/violent": [ + "text" + ], + "self-harm/intent": [ + "text", + "image" + ], + "self-harm/instructions": [ + "text", + "image" + ], + "self-harm": [ + "text", + "image" + ], + "sexual/minors": [ + "text" + ], + "violence": [ + "text", + "image" + ], + "violence/graphic": [ + "text", + "image" + ] + } + } + ] + } + /organization/audit_logs: + get: + summary: List user actions and configuration changes within this organization. + operationId: list-audit-logs + tags: + - Audit Logs + parameters: + - name: effective_at + in: query + description: Return only events whose `effective_at` (Unix seconds) is in this + range. + required: false + schema: type: object properties: - type: - enum: - - static - type: string - description: Always `static`. - static: - $ref: '#/components/schemas/StaticChunkingStrategy' - additionalProperties: false - ChunkingStrategyRequestParam: - type: object + gt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater + than this value. + gte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is greater + than or equal to this value. + lt: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than + this value. + lte: + type: integer + description: Return only events whose `effective_at` (Unix seconds) is less than + or equal to this value. + - name: project_ids[] + in: query + description: Return only events for these projects. + required: false + schema: + type: array + items: + type: string + - name: event_types[] + in: query + description: Return only events with a `type` in one of these values. For + example, `project.created`. For all options, see the documentation + for the [audit log object](/docs/api-reference/audit-logs/object). + required: false + schema: + type: array + items: + $ref: "#/components/schemas/AuditLogEventType" + - name: actor_ids[] + in: query + description: Return only events performed by these actors. Can be a user ID, a + service account ID, or an api key tracking ID. + required: false + schema: + type: array + items: + type: string + - name: actor_emails[] + in: query + description: Return only events performed by users with these emails. + required: false + schema: + type: array + items: + type: string + - name: resource_ids[] + in: query + description: Return only events performed on these targets. For example, a + project ID updated. + required: false + schema: + type: array + items: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: Audit logs listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListAuditLogsResponse" + x-oaiMeta: + name: List audit logs + group: audit-logs + returns: A list of paginated [Audit Log](/docs/api-reference/audit-logs/object) + objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/audit_logs \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: > + { + "object": "list", + "data": [ + { + "id": "audit_log-xxx_yyyymmdd", + "type": "project.archived", + "effective_at": 1722461446, + "actor": { + "type": "api_key", + "api_key": { + "type": "user", + "user": { + "id": "user-xxx", + "email": "user@example.com" + } + } + }, + "project.archived": { + "id": "proj_abc" + }, + }, + { + "id": "audit_log-yyy__20240101", + "type": "api_key.updated", + "effective_at": 1720804190, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.updated": { + "id": "key_xxxx", + "data": { + "scopes": ["resource_2.operation_2"] + } + }, + } + ], + "first_id": "audit_log-xxx__20240101", + "last_id": "audit_log_yyy__20240101", + "has_more": true + } + /organization/costs: + get: + summary: Get costs details for the organization. + operationId: usage-costs + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently only `1d` is + supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1d + default: 1d + - name: project_ids + in: query + description: Return only costs for these projects. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the costs by the specified fields. Support fields include + `project_id`, `line_item` and any combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - line_item + - name: limit + in: query + description: > + A limit on the number of buckets to be returned. Limit can range + between 1 and 180, and the default is 7. + required: false + schema: + type: integer + default: 7 + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Costs data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Costs + group: usage-costs + returns: A list of paginated, time bucketed + [Costs](/docs/api-reference/usage/costs_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/costs?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: | + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.costs.result", + "amount": { + "value": 0.06, + "currency": "usd" + }, + "line_item": null, + "project_id": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/invites: + get: + summary: Returns a list of invites in the organization. + operationId: list-invites + tags: + - Invites + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Invites listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/InviteListResponse" + x-oaiMeta: + name: List invites + group: administration + returns: A list of [Invite](/docs/api-reference/invite/object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/invites?after=invite-abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + ], + "first_id": "invite-abc", + "last_id": "invite-abc", + "has_more": false + } + post: + summary: Create an invite for a user to the organization. The invite must be + accepted by the user before they have access to the organization. + operationId: inviteUser + tags: + - Invites + requestBody: + description: The invite request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/InviteRequest" + responses: + "200": + description: User invited successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Invite" + x-oaiMeta: + name: Create invite + group: administration + returns: The created [Invite](/docs/api-reference/invite/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/invites \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "email": "user@example.com", + "role": "owner" + }' + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": null + } + /organization/invites/{invite_id}: + get: + summary: Retrieves an invite. + operationId: retrieve-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to retrieve. + responses: + "200": + description: Invite retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Invite" + x-oaiMeta: + name: Retrieve invite + group: administration + returns: The [Invite](/docs/api-reference/invite/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + delete: + summary: Delete an invite. If the invite has already been accepted, it cannot be + deleted. + operationId: delete-invite + tags: + - Invites + parameters: + - in: path + name: invite_id + required: true + schema: + type: string + description: The ID of the invite to delete. + responses: + "200": + description: Invite deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/InviteDeleteResponse" + x-oaiMeta: + name: Delete invite + group: administration + returns: Confirmation that the invite has been deleted + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/invites/invite-abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.invite.deleted", + "id": "invite-abc", + "deleted": true + } + /organization/projects: + get: + summary: Returns a list of projects. + operationId: list-projects + tags: + - Projects + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + - name: include_archived + in: query + schema: + type: boolean + default: false + description: If `true` returns all projects including those that have been + `archived`. Archived projects are not included by default. + responses: + "200": + description: Projects listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectListResponse" + x-oaiMeta: + name: List projects + group: administration + returns: A list of [Project](/docs/api-reference/projects/object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects?after=proj_abc&limit=20&include_archived=false + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ], + "first_id": "proj-abc", + "last_id": "proj-xyz", + "has_more": false + } + post: + summary: Create a new project in the organization. Projects can be created and + archived, but cannot be deleted. + operationId: create-project + tags: + - Projects + requestBody: + description: The project create request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectCreateRequest" + responses: + "200": + description: Project created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + x-oaiMeta: + name: Create project + group: administration + returns: The created [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/organization/projects \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project ABC" + }' + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project ABC", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + /organization/projects/{project_id}: + get: + summary: Retrieves a project. + operationId: retrieve-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + x-oaiMeta: + name: Retrieve project + group: administration + description: Retrieve a project. + returns: The [Project](/docs/api-reference/projects/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + post: + summary: Modifies a project in the organization. + operationId: modify-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project update request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUpdateRequest" + responses: + "200": + description: Project updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + "400": + description: Error response when updating the default project. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Modify project + group: administration + returns: The updated [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Project DEF" + }' + /organization/projects/{project_id}/api_keys: + get: + summary: Returns a list of API keys in the project. + operationId: list-project-api-keys + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project API keys listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectApiKeyListResponse" + x-oaiMeta: + name: List project API keys + group: administration + returns: A list of [ProjectApiKey](/docs/api-reference/project-api-keys/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/api_keys?after=key_abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + ], + "first_id": "key_abc", + "last_id": "key_xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + /organization/projects/{project_id}/api_keys/{key_id}: + get: + summary: Retrieves an API key in the project. + operationId: retrieve-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectApiKey" + x-oaiMeta: + name: Retrieve project API key + group: administration + returns: The [ProjectApiKey](/docs/api-reference/project-api-keys/object) object + matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + } + } + delete: + summary: Deletes an API key from the project. + operationId: delete-project-api-key + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: key_id + in: path + description: The ID of the API key. + required: true + schema: + type: string + responses: + "200": + description: Project API key deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectApiKeyDeleteResponse" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Delete project API key + group: administration + returns: Confirmation of the key's deletion or an error if the key belonged to a + service account + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/projects/proj_abc/api_keys/key_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.api_key.deleted", + "id": "key_abc", + "deleted": true + } + error_response: + content: > + { + "code": 400, + "message": "API keys cannot be deleted for service accounts, please delete the service account" + } + /organization/projects/{project_id}/archive: + post: + summary: Archives a project in the organization. Archived projects cannot be + used or updated. + operationId: archive-project + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + responses: + "200": + description: Project archived successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Project" + x-oaiMeta: + name: Archive project + group: administration + returns: The archived [Project](/docs/api-reference/projects/object) object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/archive \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project DEF", + "created_at": 1711471533, + "archived_at": 1711471533, + "status": "archived" + } + /organization/projects/{project_id}/rate_limits: + get: + summary: Returns the rate limits per model for a project. + operationId: list-project-rate-limits + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: | + A limit on the number of objects to be returned. The default is 100. + required: false + schema: + type: integer + default: 100 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, beginning with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project rate limits listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectRateLimitListResponse" + x-oaiMeta: + name: List project rate limits + group: administration + returns: A list of + [ProjectRateLimit](/docs/api-reference/project-rate-limits/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/rate_limits?after=rl_xxx&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: | + { + "object": "list", + "data": [ + { + "object": "project.rate_limit", + "id": "rl-ada", + "model": "ada", + "max_requests_per_1_minute": 600, + "max_tokens_per_1_minute": 150000, + "max_images_per_1_minute": 10 + } + ], + "first_id": "rl-ada", + "last_id": "rl-ada", + "has_more": false + } + error_response: | + { + "code": 404, + "message": "The project {project_id} was not found" + } + /organization/projects/{project_id}/rate_limits/{rate_limit_id}: + post: + summary: Updates a project rate limit. + operationId: update-project-rate-limits + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: rate_limit_id + in: path + description: The ID of the rate limit. + required: true + schema: + type: string + requestBody: + description: The project rate limit update request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectRateLimitUpdateRequest" + responses: + "200": + description: Project rate limit updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectRateLimit" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Modify project rate limit + group: administration + returns: The updated + [ProjectRateLimit](/docs/api-reference/project-rate-limits/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/rate_limits/rl_xxx + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "max_requests_per_1_minute": 500 + }' + response: | + { + "object": "project.rate_limit", + "id": "rl-ada", + "model": "ada", + "max_requests_per_1_minute": 600, + "max_tokens_per_1_minute": 150000, + "max_images_per_1_minute": 10 + } + error_response: | + { + "code": 404, + "message": "The project {project_id} was not found" + } + /organization/projects/{project_id}/service_accounts: + get: + summary: Returns a list of service accounts in the project. + operationId: list-project-service-accounts + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project service accounts listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountListResponse" + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: List project service accounts + group: administration + returns: A list of + [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts?after=custom_id&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ], + "first_id": "svc_acct_abc", + "last_id": "svc_acct_xyz", + "has_more": false + } + post: + summary: Creates a new service account in the project. This also returns an + unredacted API key for the service account. + operationId: create-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + requestBody: + description: The project service account create request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountCreateRequest" + responses: + "200": + description: Project service account created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountCreateResponse" + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Create project service account + group: administration + returns: The created + [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Production App" + }' + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Production App", + "role": "member", + "created_at": 1711471533, + "api_key": { + "object": "organization.project.service_account.api_key", + "value": "sk-abcdefghijklmnop123", + "name": "Secret Key", + "created_at": 1711471533, + "id": "key_abc" + } + } + /organization/projects/{project_id}/service_accounts/{service_account_id}: + get: + summary: Retrieves a service account in the project. + operationId: retrieve-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccount" + x-oaiMeta: + name: Retrieve project service account + group: administration + returns: The + [ProjectServiceAccount](/docs/api-reference/project-service-accounts/object) + object matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + delete: + summary: Deletes a service account from the project. + operationId: delete-project-service-account + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: service_account_id + in: path + description: The ID of the service account. + required: true + schema: + type: string + responses: + "200": + description: Project service account deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectServiceAccountDeleteResponse" + x-oaiMeta: + name: Delete project service account + group: administration + returns: Confirmation of service account being deleted, or an error in case of + an archived project, which has no service accounts + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/projects/proj_abc/service_accounts/svc_acct_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.service_account.deleted", + "id": "svc_acct_abc", + "deleted": true + } + /organization/projects/{project_id}/users: + get: + summary: Returns a list of users in the project. + operationId: list-project-users + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Project users listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserListResponse" + "400": + description: Error response when project is archived. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: List project users + group: administration + returns: A list of [ProjectUser](/docs/api-reference/project-users/object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/users?after=user_abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + post: + summary: Adds a user to the project. Users must already be members of the + organization to be added to a project. + operationId: create-project-user + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + tags: + - Projects + requestBody: + description: The project user create request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserCreateRequest" + responses: + "200": + description: User added to project successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUser" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Create project user + group: administration + returns: The created [ProjectUser](/docs/api-reference/project-users/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/users \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "user_id": "user_abc", + "role": "member" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + error_response: + content: | + { + "code": 400, + "message": "Project {name} is archived" + } + /organization/projects/{project_id}/users/{user_id}: + get: + summary: Retrieves a user in the project. + operationId: retrieve-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUser" + x-oaiMeta: + name: Retrieve project user + group: administration + returns: The [ProjectUser](/docs/api-reference/project-users/object) object + matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + post: + summary: Modifies a user's role in the project. + operationId: modify-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + requestBody: + description: The project user update request payload. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserUpdateRequest" + responses: + "200": + description: Project user's role updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUser" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Modify project user + group: administration + returns: The updated [ProjectUser](/docs/api-reference/project-users/object) + object. + examples: + request: + curl: > + curl -X POST + https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + delete: + summary: Deletes a user from the project. + operationId: delete-project-user + tags: + - Projects + parameters: + - name: project_id + in: path + description: The ID of the project. + required: true + schema: + type: string + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: Project user deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ProjectUserDeleteResponse" + "400": + description: Error response for various conditions. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + x-oaiMeta: + name: Delete project user + group: administration + returns: Confirmation that project has been deleted or an error in case of an + archived project, which has no users + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/projects/proj_abc/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.project.user.deleted", + "id": "user_abc", + "deleted": true + } + /organization/usage/audio_speeches: + get: + summary: Get audio speeches usage details for the organization. + operationId: usage-audio-speeches + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Audio speeches + group: usage-audio-speeches + returns: A list of paginated, time bucketed [Audio speeches + usage](/docs/api-reference/usage/audio_speeches_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/audio_speeches?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.audio_speeches.result", + "characters": 45, + "num_model_requests": 1, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/audio_transcriptions: + get: + summary: Get audio transcriptions usage details for the organization. + operationId: usage-audio-transcriptions + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Audio transcriptions + group: usage-audio-transcriptions + returns: A list of paginated, time bucketed [Audio transcriptions + usage](/docs/api-reference/usage/audio_transcriptions_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/audio_transcriptions?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.audio_transcriptions.result", + "seconds": 20, + "num_model_requests": 1, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/code_interpreter_sessions: + get: + summary: Get code interpreter sessions usage details for the organization. + operationId: usage-code-interpreter-sessions + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Code interpreter sessions + group: usage-code-interpreter-sessions + returns: A list of paginated, time bucketed [Code interpreter sessions + usage](/docs/api-reference/usage/code_interpreter_sessions_object) + objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/code_interpreter_sessions?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.code_interpreter_sessions.result", + "sessions": 1, + "project_id": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/completions: + get: + summary: Get completions usage details for the organization. + operationId: usage-completions + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: batch + in: query + description: > + If `true`, return batch jobs only. If `false`, return non-batch jobs + only. By default, return both. + required: false + schema: + type: boolean + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model`, `batch` or + any combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - batch + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Completions + group: usage-completions + returns: A list of paginated, time bucketed [Completions + usage](/docs/api-reference/usage/completions_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/completions?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.completions.result", + "input_tokens": 1000, + "output_tokens": 500, + "input_cached_tokens": 800, + "num_model_requests": 5, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null, + "batch": null + } + ] + } + ], + "has_more": true, + "next_page": "AAAAAGdGxdEiJdKOAAAAAGcqsYA=" + } + /organization/usage/embeddings: + get: + summary: Get embeddings usage details for the organization. + operationId: usage-embeddings + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Embeddings + group: usage-embeddings + returns: A list of paginated, time bucketed [Embeddings + usage](/docs/api-reference/usage/embeddings_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/embeddings?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.embeddings.result", + "input_tokens": 16, + "num_model_requests": 2, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/images: + get: + summary: Get images usage details for the organization. + operationId: usage-images + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: sources + in: query + description: Return only usages for these sources. Possible values are + `image.generation`, `image.edit`, `image.variation` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - image.generation + - image.edit + - image.variation + - name: sizes + in: query + description: Return only usages for these image sizes. Possible values are + `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1792 + - 1024x1792 + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model`, `size`, + `source` or any combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - size + - source + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Images + group: usage-images + returns: A list of paginated, time bucketed [Images + usage](/docs/api-reference/usage/images_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/images?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: | + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.images.result", + "images": 2, + "num_model_requests": 2, + "size": null, + "source": null, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/moderations: + get: + summary: Get moderations usage details for the organization. + operationId: usage-moderations + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: user_ids + in: query + description: Return only usage for these users. + required: false + schema: + type: array + items: + type: string + - name: api_key_ids + in: query + description: Return only usage for these API keys. + required: false + schema: + type: array + items: + type: string + - name: models + in: query + description: Return only usage for these models. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`, `user_id`, `api_key_id`, `model` or any + combination of them. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - user_id + - api_key_id + - model + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Moderations + group: usage-moderations + returns: A list of paginated, time bucketed [Moderations + usage](/docs/api-reference/usage/moderations_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/moderations?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.moderations.result", + "input_tokens": 16, + "num_model_requests": 2, + "project_id": null, + "user_id": null, + "api_key_id": null, + "model": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/usage/vector_stores: + get: + summary: Get vector stores usage details for the organization. + operationId: usage-vector-stores + tags: + - Usage + parameters: + - name: start_time + in: query + description: Start time (Unix seconds) of the query time range, inclusive. + required: true + schema: + type: integer + - name: end_time + in: query + description: End time (Unix seconds) of the query time range, exclusive. + required: false + schema: + type: integer + - name: bucket_width + in: query + description: Width of each time bucket in response. Currently `1m`, `1h` and + `1d` are supported, default to `1d`. + required: false + schema: + type: string + enum: + - 1m + - 1h + - 1d + default: 1d + - name: project_ids + in: query + description: Return only usage for these projects. + required: false + schema: + type: array + items: + type: string + - name: group_by + in: query + description: Group the usage data by the specified fields. Support fields + include `project_id`. + required: false + schema: + type: array + items: + type: string + enum: + - project_id + - name: limit + in: query + description: | + Specifies the number of buckets to return. + - `bucket_width=1d`: default: 7, max: 31 + - `bucket_width=1h`: default: 24, max: 168 + - `bucket_width=1m`: default: 60, max: 1440 + required: false + schema: + type: integer + - name: page + in: query + description: A cursor for use in pagination. Corresponding to the `next_page` + field from the previous response. + schema: + type: string + responses: + "200": + description: Usage data retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UsageResponse" + x-oaiMeta: + name: Vector stores + group: usage-vector-stores + returns: A list of paginated, time bucketed [Vector stores + usage](/docs/api-reference/usage/vector_stores_object) objects. + examples: + request: + curl: > + curl + "https://api.openai.com/v1/organization/usage/vector_stores?start_time=1730419200&limit=1" + \ + + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + + -H "Content-Type: application/json" + response: > + { + "object": "page", + "data": [ + { + "object": "bucket", + "start_time": 1730419200, + "end_time": 1730505600, + "results": [ + { + "object": "orgainzation.usage.vector_stores.result", + "usage_bytes": 1024, + "project_id": null + } + ] + } + ], + "has_more": false, + "next_page": null + } + /organization/users: + get: + summary: Lists all of the users in the organization. + operationId: list-users + tags: + - Users + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + required: false + schema: + type: string + responses: + "200": + description: Users listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UserListResponse" + x-oaiMeta: + name: List users + group: administration + returns: A list of [User](/docs/api-reference/users/object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/organization/users?after=user_abc&limit=20 + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "list", + "data": [ + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ], + "first_id": "user-abc", + "last_id": "user-xyz", + "has_more": false + } + /organization/users/{user_id}: + get: + summary: Retrieves a user by their identifier. + operationId: retrieve-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/User" + x-oaiMeta: + name: Retrieve user + group: administration + returns: The [User](/docs/api-reference/users/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + post: + summary: Modifies a user's role in the organization. + operationId: modify-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + requestBody: + description: The new user role to modify. This must be one of `owner` or `member`. + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UserRoleUpdateRequest" + responses: + "200": + description: User role updated successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/User" + x-oaiMeta: + name: Modify user + group: administration + returns: The updated [User](/docs/api-reference/users/object) object. + examples: + request: + curl: > + curl -X POST https://api.openai.com/v1/organization/users/user_abc + \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "role": "owner" + }' + response: + content: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + delete: + summary: Deletes a user from the organization. + operationId: delete-user + tags: + - Users + parameters: + - name: user_id + in: path + description: The ID of the user. + required: true + schema: + type: string + responses: + "200": + description: User deleted successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/UserDeleteResponse" + x-oaiMeta: + name: Delete user + group: administration + returns: Confirmation of the deleted user + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/organization/users/user_abc \ + -H "Authorization: Bearer $OPENAI_ADMIN_KEY" \ + -H "Content-Type: application/json" + response: + content: | + { + "object": "organization.user.deleted", + "id": "user_abc", + "deleted": true + } + /threads: + post: + operationId: createThread + tags: + - Assistants + summary: Create a thread. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Create thread + group: threads + beta: true + returns: A [thread](/docs/api-reference/threads) object. + examples: + - title: Empty + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '' + python: | + from openai import OpenAI + client = OpenAI() + + empty_thread = client.beta.threads.create() + print(empty_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const emptyThread = await openai.beta.threads.create(); + + console.log(emptyThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699012949, + "metadata": {}, + "tool_resources": {} + } + - title: Messages + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "messages": [{ + "role": "user", + "content": "Hello, what is AI?" + }, { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }] + }' + python: | + from openai import OpenAI + client = OpenAI() + + message_thread = client.beta.threads.create( + messages=[ + { + "role": "user", + "content": "Hello, what is AI?" + }, + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }, + ] + ) + + print(message_thread) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const messageThread = await openai.beta.threads.create({ + messages: [ + { + role: "user", + content: "Hello, what is AI?" + }, + { + role: "user", + content: "How does AI work? Explain it in simple terms.", + }, + ], + }); + + console.log(messageThread); + } + + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {}, + "tool_resources": {} + } + /threads/runs: + post: + operationId: createThreadAndRun + tags: + - Assistants + summary: Create a thread and run it in one request. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadAndRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create thread and run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + - title: Default + request: + curl: > + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + run = client.beta.threads.create_and_run( + assistant_id="asst_abc123", + thread={ + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + ) + + + print(run) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const run = await openai.beta.threads.createAndRun({ + assistant_id: "asst_abc123", + thread: { + messages: [ + { role: "user", content: "Explain deep learning to a 5 year old." }, + ], + }, + }); + + console.log(run); + } + + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076792, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": null, + "expires_at": 1699077392, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "required_action": null, + "last_error": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant.", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": null, + "max_prompt_tokens": null, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "incomplete_details": null, + "usage": null, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_123", + "thread": { + "messages": [ + {"role": "user", "content": "Hello"} + ] + }, + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.create_and_run( + assistant_id="asst_123", + thread={ + "messages": [ + {"role": "user", "content": "Hello"} + ] + }, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "Hello" }, + ], + }, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: > + event: thread.created + + data: + {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} + + + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], + "metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], + "metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + today"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! + How can I assist you today?","annotations":[]}}], "metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + + event: thread.run.completed + + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true} + + + event: done + + data: [DONE] + - title: Streaming with Functions + request: + curl: > + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + + stream = client.beta.threads.create_and_run( + thread={ + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + + for event in stream: + print(event) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "What is the weather like in San Francisco?" }, + ], + }, + tools: tools, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } + } + + + main(); + response: > + event: thread.created + + data: + {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} + + + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} + + + ... + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} + + + event: thread.run.step.delta + + data: + {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} + + + event: thread.run.requires_action + + data: + {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San + Francisco, + CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + /threads/{thread_id}: + get: + operationId: getThread + tags: + - Assistants + summary: Retrieves a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Retrieve thread + group: threads + beta: true + returns: The [thread](/docs/api-reference/threads/object) object matching the + specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_thread = client.beta.threads.retrieve("thread_abc123") + print(my_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myThread = await openai.beta.threads.retrieve( + "thread_abc123" + ); + + console.log(myThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {}, + "tool_resources": { + "code_interpreter": { + "file_ids": [] + } + } + } + post: + operationId: modifyThread + tags: + - Assistants + summary: Modifies a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to modify. Only the `metadata` can be modified. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Modify thread + group: threads + beta: true + returns: The modified [thread](/docs/api-reference/threads/object) object + matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + my_updated_thread = client.beta.threads.update( + "thread_abc123", + metadata={ + "modified": "true", + "user": "abc123" + } + ) + print(my_updated_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const updatedThread = await openai.beta.threads.update( + "thread_abc123", + { + metadata: { modified: "true", user: "abc123" }, + } + ); + + console.log(updatedThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": { + "modified": "true", + "user": "abc123" + }, + "tool_resources": {} + } + delete: + operationId: deleteThread + tags: + - Assistants + summary: Delete a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteThreadResponse" + x-oaiMeta: + name: Delete thread + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + response = client.beta.threads.delete("thread_abc123") + print(response) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.beta.threads.del("thread_abc123"); + + console.log(response); + } + main(); + response: | + { + "id": "thread_abc123", + "object": "thread.deleted", + "deleted": true + } + /threads/{thread_id}/messages: + get: + operationId: listMessages + tags: + - Assistants + summary: Returns a list of messages for a given thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) the messages + belong to. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: run_id + in: query + description: | + Filter messages by the run ID that generated them. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListMessagesResponse" + x-oaiMeta: + name: List messages + group: threads + beta: true + returns: A list of [message](/docs/api-reference/messages) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: > + from openai import OpenAI + + client = OpenAI() + + + thread_messages = + client.beta.threads.messages.list("thread_abc123") + + print(thread_messages.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.list( + "thread_abc123" + ); + + console.log(threadMessages.data); + } + + main(); + response: > + { + "object": "list", + "data": [ + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699016383, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + }, + { + "id": "msg_abc456", + "object": "thread.message", + "created_at": 1699016383, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "Hello, what is AI?", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + ], + "first_id": "msg_abc123", + "last_id": "msg_abc456", + "has_more": false + } + post: + operationId: createMessage + tags: + - Assistants + summary: Create a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to create a + message for. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Create message + group: threads + beta: true + returns: A [message](/docs/api-reference/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }' + python: | + from openai import OpenAI + client = OpenAI() + + thread_message = client.beta.threads.messages.create( + "thread_abc123", + role="user", + content="How does AI work? Explain it in simple terms.", + ) + print(thread_message) + node.js: >- + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const threadMessages = await openai.beta.threads.messages.create( + "thread_abc123", + { role: "user", content: "How does AI work? Explain it in simple terms." } + ); + + console.log(threadMessages); + } + + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1713226573, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + /threads/{thread_id}/messages/{message_id}: + get: + operationId: getMessage + tags: + - Assistants + summary: Retrieve a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this + message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Retrieve message + group: threads + beta: true + returns: The [message](/docs/api-reference/messages/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 + \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.retrieve( + message_id="msg_abc123", + thread_id="thread_abc123", + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.retrieve( + "thread_abc123", + "msg_abc123" + ); + + console.log(message); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + post: + operationId: modifyMessage + tags: + - Assistants + summary: Modifies a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Modify message + group: threads + beta: true + returns: The modified [message](/docs/api-reference/messages/object) object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 + \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.update( + message_id="msg_abc12", + thread_id="thread_abc123", + metadata={ + "modified": "true", + "user": "abc123", + }, + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.update( + "thread_abc123", + "msg_abc123", + { + metadata: { + modified: "true", + user: "abc123", + }, + } + }' + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "metadata": { + "modified": "true", + "user": "abc123" + } + } + delete: + operationId: deleteMessage + tags: + - Assistants + summary: Deletes a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteMessageResponse" + x-oaiMeta: + name: Delete message + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: > + curl -X DELETE + https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 + \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + deleted_message = client.beta.threads.messages.delete( + message_id="msg_abc12", + thread_id="thread_abc123", + ) + print(deleted_message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const deletedMessage = await openai.beta.threads.messages.del( + "thread_abc123", + "msg_abc123" + ); + + console.log(deletedMessage); + } + response: | + { + "id": "msg_abc123", + "object": "thread.message.deleted", + "deleted": true + } + /threads/{thread_id}/runs: + get: + operationId: listRuns + tags: + - Assistants + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run belongs to. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunsResponse" + x-oaiMeta: + name: List runs + group: threads + beta: true + returns: A list of [run](/docs/api-reference/runs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + runs = client.beta.threads.runs.list( + "thread_abc123" + ) + + print(runs) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const runs = await openai.beta.threads.runs.list( + "thread_abc123" + ); + + console.log(runs); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + }, + { + "id": "run_abc456", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + ], + "first_id": "run_abc123", + "last_id": "run_abc456", + "has_more": false + } + post: + operationId: createRun + tags: + - Assistants + summary: Create a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to run. + - name: include[] + in: query + description: > + A list of additional fields to include in the response. Currently + the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch + the file search result content. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + schema: + type: array + items: + type: string + enum: + - step_details.tool_calls[*].file_search.results[*].content + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.create( + "thread_abc123", + { assistant_id: "asst_abc123" } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_123", + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.runs.create( + thread_id="thread_123", + assistant_id="asst_123", + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_123", + { assistant_id: "asst_123", stream: true } + ); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: > + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + today"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! + How can I assist you today?","annotations":[]}}],"metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + + event: thread.run.completed + + data: + {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + - title: Streaming with Functions + request: + curl: > + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + + stream = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + + for event in stream: + print(event) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_abc123", + { + assistant_id: "asst_abc123", + tools: tools, + stream: true + } + ); + + for await (const event of stream) { + console.log(event); + } + } + + + main(); + response: > + event: thread.run.created + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + today"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! + How can I assist you today?","annotations":[]}}],"metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + + event: thread.run.completed + + data: + {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + /threads/{thread_id}/runs/{run_id}: + get: + operationId: getRun + tags: + - Assistants + summary: Retrieves a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Retrieve run + group: threads + beta: true + returns: The [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.retrieve( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.retrieve( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + post: + operationId: modifyRun + tags: + - Assistants + summary: Modifies a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Modify run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "user_id": "user_abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.update( + thread_id="thread_abc123", + run_id="run_abc123", + metadata={"user_id": "user_abc123"}, + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.update( + "thread_abc123", + "run_abc123", + { + metadata: { + user_id: "user_abc123", + }, + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": { + "user_id": "user_abc123" + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + /threads/{thread_id}/runs/{run_id}/cancel: + post: + operationId: cancelRun + tags: + - Assistants + summary: Cancels a run that is `in_progress`. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Cancel a run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.cancel( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.cancel( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076126, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "cancelling", + "started_at": 1699076126, + "expires_at": 1699076726, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4o", + "instructions": "You summarize books.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": ["vs_123"] + } + }, + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + /threads/{thread_id}/runs/{run_id}/steps: + get: + operationId: listRunSteps + tags: + - Assistants + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run and run steps belong to. + - name: run_id + in: path + required: true + schema: + type: string + description: The ID of the run the run steps belong to. + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: include[] + in: query + description: > + A list of additional fields to include in the response. Currently + the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch + the file search result content. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + schema: + type: array + items: + type: string + enum: + - step_details.tool_calls[*].file_search.results[*].content + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunStepsResponse" + x-oaiMeta: + name: List run steps + group: threads + beta: true + returns: A list of [run step](/docs/api-reference/run-steps/step-object) + objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run_steps = client.beta.threads.runs.steps.list( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run_steps) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.list( + "thread_abc123", + "run_abc123" + ); + console.log(runStep); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + ], + "first_id": "step_abc123", + "last_id": "step_abc456", + "has_more": false + } + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + operationId: getRunStep + tags: + - Assistants + summary: Retrieves a run step. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which the run and run step belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to which the run step belongs. + - in: path + name: step_id + required: true + schema: + type: string + description: The ID of the run step to retrieve. + - name: include[] + in: query + description: > + A list of additional fields to include in the response. Currently + the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch + the file search result content. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + schema: + type: array + items: + type: string + enum: + - step_details.tool_calls[*].file_search.results[*].content + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunStepObject" + x-oaiMeta: + name: Retrieve run step + group: threads + beta: true + returns: The [run step](/docs/api-reference/run-steps/step-object) object + matching the specified ID. + examples: + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run_step = client.beta.threads.runs.steps.retrieve( + thread_id="thread_abc123", + run_id="run_abc123", + step_id="step_abc123" + ) + + print(run_step) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.retrieve( + "thread_abc123", + "run_abc123", + "step_abc123" + ); + console.log(runStep); + } + + main(); + response: | + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + operationId: submitToolOuputsToRun + tags: + - Assistants + summary: > + When a run has the `status: "requires_action"` and + `required_action.type` is `submit_tool_outputs`, this endpoint can be + used to submit the outputs from the tool calls once they're all + completed. All outputs must be submitted in a single request. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this + run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run that requires the tool output submission. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SubmitToolOutputsRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Submit tool outputs to run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the + specified ID. + examples: + - title: Default + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ] + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", + }, + ], + } + ); + + console.log(run); + } + + main(); + response: > + { + "id": "run_123", + "object": "thread.run", + "created_at": 1699075592, + "assistant_id": "asst_123", + "thread_id": "thread_123", + "status": "queued", + "started_at": 1699075592, + "expires_at": 1699076192, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + - title: Streaming + request: + curl: > + curl + https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ], + stream=True + ) + + for event in stream: + print(event) + node.js: > + import OpenAI from "openai"; + + + const openai = new OpenAI(); + + + async function main() { + const stream = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", + }, + ], + } + ); + + for await (const event of stream) { + console.log(event); + } + } + + + main(); + response: > + event: thread.run.step.completed + + data: + {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San + Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and + sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} + + + event: thread.run.queued + + data: + {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.in_progress + + data: + {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: thread.run.step.created + + data: + {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + + event: thread.run.step.in_progress + + data: + {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + + event: thread.message.created + + data: + {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.in_progress + + data: + {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + current"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + weather"}}]}} + + + ... + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" + sunny"}}]}} + + + event: thread.message.delta + + data: + {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} + + + event: thread.message.completed + + data: + {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The + current weather in San Francisco, CA is 70 degrees Fahrenheit and + sunny.","annotations":[]}}],"metadata":{}} + + + event: thread.run.step.completed + + data: + {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} + + + event: thread.run.completed + + data: + {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4o","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get + the current weather in a given + location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The + city and state, e.g. San Francisco, + CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto","parallel_tool_calls":true}} + + + event: done + + data: [DONE] + /uploads: + post: + operationId: createUpload + tags: + - Uploads + summary: > + Creates an intermediate [Upload](/docs/api-reference/uploads/object) + object that you can add [Parts](/docs/api-reference/uploads/part-object) + to. Currently, an Upload can accept at most 8 GB in total and expires + after an hour after you create it. + + + Once you complete the Upload, we will create a + [File](/docs/api-reference/files/object) object that contains all the + parts you uploaded. This File is usable in the rest of our platform as a + regular File object. + + + For certain `purpose`s, the correct `mime_type` must be specified. + Please refer to documentation for the supported MIME types for your use + case: + + - [Assistants](/docs/assistants/tools/file-search#supported-files) + + + For guidance on the proper filename extensions for each purpose, please + follow the documentation on [creating a + File](/docs/api-reference/files/create). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Create upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status + `pending`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "purpose": "fine-tune", + "filename": "training_examples.jsonl", + "bytes": 2147483648, + "mime_type": "text/jsonl" + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "pending", + "expires_at": 1719127296 + } + /uploads/{upload_id}/cancel: + post: + operationId: cancelUpload + tags: + - Uploads + summary: | + Cancels the Upload. No Parts may be added after an Upload is cancelled. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Cancel upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status + `cancelled`. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/cancel + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "cancelled", + "expires_at": 1719127296 + } + /uploads/{upload_id}/complete: + post: + operationId: completeUpload + tags: + - Uploads + summary: > + Completes the [Upload](/docs/api-reference/uploads/object). + + + Within the returned Upload object, there is a nested + [File](/docs/api-reference/files/object) object that is ready to use in + the rest of the platform. + + + You can specify the order of the Parts by passing in an ordered list of + the Part IDs. + + + The number of bytes uploaded upon completion must match the number of + bytes initially specified when creating the Upload object. No Parts may + be added after an Upload is completed. + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CompleteUploadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Upload" + x-oaiMeta: + name: Complete upload + group: uploads + returns: The [Upload](/docs/api-reference/uploads/object) object with status + `completed` with an additional `file` property containing the created + usable File object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/complete + -d '{ + "part_ids": ["part_def456", "part_ghi789"] + }' + response: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + /uploads/{upload_id}/parts: + post: + operationId: addUploadPart + tags: + - Uploads + summary: > + Adds a [Part](/docs/api-reference/uploads/part-object) to an + [Upload](/docs/api-reference/uploads/object) object. A Part represents a + chunk of bytes from the file you are trying to upload. + + + Each Part can be at most 64 MB, and you can add Parts until you hit the + Upload maximum of 8 GB. + + + It is possible to add multiple Parts in parallel. You can decide the + intended order of the Parts when you [complete the + Upload](/docs/api-reference/uploads/complete). + parameters: + - in: path + name: upload_id + required: true + schema: + type: string + example: upload_abc123 + description: | + The ID of the Upload. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/AddUploadPartRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/UploadPart" + x-oaiMeta: + name: Add upload part + group: uploads + returns: The upload [Part](/docs/api-reference/uploads/part-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/uploads/upload_abc123/parts + -F data="aHR0cHM6Ly9hcGkub3BlbmFpLmNvbS92MS91cGxvYWRz..." + response: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719185911, + "upload_id": "upload_abc123" + } + /vector_stores: + get: + operationId: listVectorStores + tags: + - Vector stores + summary: Returns a list of vector stores. + parameters: + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoresResponse" + x-oaiMeta: + name: List vector stores + group: vector_stores + beta: true + returns: A list of [vector store](/docs/api-reference/vector-stores/object) + objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_stores = client.beta.vector_stores.list() + print(vector_stores) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStores = await openai.beta.vectorStores.list(); + console.log(vectorStores); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + }, + { + "id": "vs_abc456", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ v2", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + ], + "first_id": "vs_abc123", + "last_id": "vs_abc456", + "has_more": false + } + post: + operationId: createVectorStore + tags: + - Vector stores + summary: Create a vector store. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Create vector store + group: vector_stores + beta: true + returns: A [vector store](/docs/api-reference/vector-stores/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + -d '{ + "name": "Support FAQ" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.create( + name="Support FAQ" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.create({ + name: "Support FAQ" + }); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + /vector_stores/{vector_store_id}: + get: + operationId: getVectorStore + tags: + - Vector stores + summary: Retrieves a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Retrieve vector store + group: vector_stores + beta: true + returns: The [vector store](/docs/api-reference/vector-stores/object) object + matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.retrieve( + vector_store_id="vs_abc123" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.retrieve( + "vs_abc123" + ); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776 + } + post: + operationId: modifyVectorStore + tags: + - Vector stores + summary: Modifies a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateVectorStoreRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Modify vector store + group: vector_stores + beta: true + returns: The modified [vector store](/docs/api-reference/vector-stores/object) + object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + -d '{ + "name": "Support FAQ" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.update( + vector_store_id="vs_abc123", + name="Support FAQ" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.update( + "vs_abc123", + { + name: "Support FAQ" + } + ); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + delete: + operationId: deleteVectorStore + tags: + - Vector stores + summary: Delete a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteVectorStoreResponse" + x-oaiMeta: + name: Delete vector store + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + deleted_vector_store = client.beta.vector_stores.delete( + vector_store_id="vs_abc123" + ) + print(deleted_vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const deletedVectorStore = await openai.beta.vectorStores.del( + "vs_abc123" + ); + console.log(deletedVectorStore); + } + + main(); + response: | + { + id: "vs_abc123", + object: "vector_store.deleted", + deleted: true + } + /vector_stores/{vector_store_id}/file_batches: + post: + operationId: createVectorStoreFileBatch + tags: + - Vector stores + summary: Create a vector store file batch. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: | + The ID of the vector store for which to create a File Batch. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreFileBatchRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Create vector store file batch + group: vector_stores + beta: true + returns: A [vector store file + batch](/docs/api-reference/vector-stores-file-batches/batch-object) + object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "file_ids": ["file-abc123", "file-abc456"] + }' + python: > + from openai import OpenAI + + client = OpenAI() + + + vector_store_file_batch = + client.beta.vector_stores.file_batches.create( + vector_store_id="vs_abc123", + file_ids=["file-abc123", "file-abc456"] + ) + + print(vector_store_file_batch) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( + "vs_abc123", + { + file_ids: ["file-abc123", "file-abc456"] + } + ); + console.log(myVectorStoreFileBatch); + } + + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "in_progress", + "file_counts": { + "in_progress": 1, + "completed": 1, + "failed": 0, + "cancelled": 0, + "total": 0, + } + } + /vector_stores/{vector_store_id}/file_batches/{batch_id}: + get: + operationId: getVectorStoreFileBatch + tags: + - Vector stores + summary: Retrieves a vector store file batch. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: The ID of the vector store that the file batch belongs to. + - in: path + name: batch_id + required: true + schema: + type: string + example: vsfb_abc123 + description: The ID of the file batch being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Retrieve vector store file batch + group: vector_stores + beta: true + returns: The [vector store file + batch](/docs/api-reference/vector-stores-file-batches/batch-object) + object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: > + from openai import OpenAI + + client = OpenAI() + + + vector_store_file_batch = + client.beta.vector_stores.file_batches.retrieve( + vector_store_id="vs_abc123", + batch_id="vsfb_abc123" + ) + + print(vector_store_file_batch) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( + "vs_abc123", + "vsfb_abc123" + ); + console.log(vectorStoreFileBatch); + } + + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "in_progress", + "file_counts": { + "in_progress": 1, + "completed": 1, + "failed": 0, + "cancelled": 0, + "total": 0, + } + } + /vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel: + post: + operationId: cancelVectorStoreFileBatch + tags: + - Vector stores + summary: Cancel a vector store file batch. This attempts to cancel the + processing of files in this batch as soon as possible. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store that the file batch belongs to. + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the file batch to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Cancel vector store file batch + group: vector_stores + beta: true + returns: The modified vector store file batch object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X POST + python: > + from openai import OpenAI + + client = OpenAI() + + + deleted_vector_store_file_batch = + client.beta.vector_stores.file_batches.cancel( + vector_store_id="vs_abc123", + file_batch_id="vsfb_abc123" + ) + + print(deleted_vector_store_file_batch) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( + "vs_abc123", + "vsfb_abc123" + ); + console.log(deletedVectorStoreFileBatch); + } + + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "cancelling", + "file_counts": { + "in_progress": 12, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 15, + } + } + /vector_stores/{vector_store_id}/file_batches/{batch_id}/files: + get: + operationId: listFilesInVectorStoreBatch + tags: + - Vector stores + summary: Returns a list of vector store files in a batch. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: batch_id + in: path + description: The ID of the file batch that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: filter + in: query + description: Filter by file status. One of `in_progress`, `completed`, `failed`, + `cancelled`. + schema: + type: string + enum: + - in_progress + - completed + - failed + - cancelled + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoreFilesResponse" + x-oaiMeta: + name: List vector store files in a batch + group: vector_stores + beta: true + returns: A list of [vector store + file](/docs/api-reference/vector-stores-files/file-object) objects. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: > + from openai import OpenAI + + client = OpenAI() + + + vector_store_files = + client.beta.vector_stores.file_batches.list_files( + vector_store_id="vs_abc123", + batch_id="vsfb_abc123" + ) + + print(vector_store_files) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( + "vs_abc123", + "vsfb_abc123" + ); + console.log(vectorStoreFiles); + } + + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + }, + { + "id": "file-abc456", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + /vector_stores/{vector_store_id}/files: + get: + operationId: listVectorStoreFiles + tags: + - Vector stores + summary: Returns a list of vector store files. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: > + A limit on the number of objects to be returned. Limit can range + between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: > + Sort order by the `created_at` timestamp of the objects. `asc` for + ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: + - asc + - desc + - name: after + in: query + description: > + A cursor for use in pagination. `after` is an object ID that defines + your place in the list. For instance, if you make a list request and + receive 100 objects, ending with obj_foo, your subsequent call can + include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: > + A cursor for use in pagination. `before` is an object ID that + defines your place in the list. For instance, if you make a list + request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the + previous page of the list. + schema: + type: string + - name: filter + in: query + description: Filter by file status. One of `in_progress`, `completed`, `failed`, + `cancelled`. + schema: + type: string + enum: + - in_progress + - completed + - failed + - cancelled + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoreFilesResponse" + x-oaiMeta: + name: List vector store files + group: vector_stores + beta: true + returns: A list of [vector store + file](/docs/api-reference/vector-stores-files/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_files = client.beta.vector_stores.files.list( + vector_store_id="vs_abc123" + ) + print(vector_store_files) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFiles = await openai.beta.vectorStores.files.list( + "vs_abc123" + ); + console.log(vectorStoreFiles); + } + + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + }, + { + "id": "file-abc456", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + post: + operationId: createVectorStoreFile + tags: + - Vector stores + summary: Create a vector store file by attaching a + [File](/docs/api-reference/files) to a [vector + store](/docs/api-reference/vector-stores/object). + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: | + The ID of the vector store for which to create a File. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileObject" + x-oaiMeta: + name: Create vector store file + group: vector_stores + beta: true + returns: A [vector store + file](/docs/api-reference/vector-stores-files/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "file_id": "file-abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file = client.beta.vector_stores.files.create( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + print(vector_store_file) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const myVectorStoreFile = await openai.beta.vectorStores.files.create( + "vs_abc123", + { + file_id: "file-abc123" + } + ); + console.log(myVectorStoreFile); + } + + + main(); + response: | + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "usage_bytes": 1234, + "vector_store_id": "vs_abcd", + "status": "completed", + "last_error": null + } + /vector_stores/{vector_store_id}/files/{file_id}: + get: + operationId: getVectorStoreFile + tags: + - Vector stores + summary: Retrieves a vector store file. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: The ID of the vector store that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + example: file-abc123 + description: The ID of the file being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileObject" + x-oaiMeta: + name: Retrieve vector store file + group: vector_stores + beta: true + returns: The [vector store + file](/docs/api-reference/vector-stores-files/file-object) object. + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file = client.beta.vector_stores.files.retrieve( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + print(vector_store_file) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( + "vs_abc123", + "file-abc123" + ); + console.log(vectorStoreFile); + } + + + main(); + response: | + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abcd", + "status": "completed", + "last_error": null + } + delete: + operationId: deleteVectorStoreFile + tags: + - Vector stores + summary: Delete a vector store file. This will remove the file from the vector + store but the file itself will not be deleted. To delete the file, use + the [delete file](/docs/api-reference/files/delete) endpoint. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteVectorStoreFileResponse" + x-oaiMeta: + name: Delete vector store file + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: > + curl + https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 + \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: > + from openai import OpenAI + + client = OpenAI() + + + deleted_vector_store_file = + client.beta.vector_stores.files.delete( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + + print(deleted_vector_store_file) + node.js: > + import OpenAI from "openai"; + + const openai = new OpenAI(); + + + async function main() { + const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( + "vs_abc123", + "file-abc123" + ); + console.log(deletedVectorStoreFile); + } + + + main(); + response: | + { + id: "file-abc123", + object: "vector_store.file.deleted", + deleted: true + } +components: + schemas: + AddUploadPartRequest: + type: object + additionalProperties: false + properties: + data: + description: | + The chunk of bytes for this Part. + type: string + format: binary + required: + - data + AssistantObject: + type: object + title: Assistant + description: Represents an `assistant` that can call the model and use tools. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `assistant`. + type: string + enum: + - assistant + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. + type: integer + name: + description: | + The name of the assistant. The maximum length is 256 characters. + type: string + maxLength: 256 + nullable: true + description: + description: > + The description of the assistant. The maximum length is 512 + characters. + type: string + maxLength: 512 + nullable: true + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + type: string + instructions: + description: > + The system instructions that the assistant uses. The maximum length + is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: > + A list of tool enabled on the assistant. There can be a maximum of + 128 tools per assistant. Tools can be of types `code_interpreter`, + `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: oneOf: - - $ref: '#/components/schemas/AutoChunkingStrategyRequestParam' - - $ref: '#/components/schemas/StaticChunkingStrategyRequestParam' - description: 'The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy.' + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" x-oaiExpandable: true - CreateVectorStoreFileRequest: - required: - - file_id - type: object - properties: - file_id: - type: string - description: 'A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - ListVectorStoreFilesResponse: - required: - - object - - data - - first_id - - last_id - - has_more - properties: - object: - type: string - example: list - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreFileObject' - first_id: - type: string - example: file-abc123 - last_id: - type: string - example: file-abc456 - has_more: - type: boolean - example: false - DeleteVectorStoreFileResponse: - required: - - id - - object - - deleted - type: object - properties: - id: - type: string - deleted: - type: boolean - object: - enum: - - vector_store.file.deleted - type: string - VectorStoreFileBatchObject: - title: Vector store file batch - required: - - id - - object - - created_at - - vector_store_id - - status - - file_counts - type: object - properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints.' - object: - enum: - - vector_store.files_batch - type: string - description: 'The object type, which is always `vector_store.file_batch`.' - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the vector store files batch was created. - vector_store_id: - type: string - description: 'The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to.' - status: - enum: - - in_progress - - completed - - cancelled - - failed - type: string - description: 'The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`.' - file_counts: - required: - - in_progress - - completed - - cancelled - - failed - - total - type: object - properties: - in_progress: - type: integer - description: The number of files that are currently being processed. - completed: - type: integer - description: The number of files that have been processed. - failed: - type: integer - description: The number of files that have failed to process. - cancelled: - type: integer - description: The number of files that where cancelled. - total: - type: integer - description: The total number of files. - description: A batch of files attached to a vector store. - x-oaiMeta: - name: The vector store files batch object - beta: true - example: "{\n \"id\": \"vsfb_123\",\n \"object\": \"vector_store.files_batch\",\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 100\n }\n}\n" - CreateVectorStoreFileBatchRequest: - required: - - file_ids - type: object - properties: + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: file_ids: - maxItems: 500 - minItems: 1 - type: array - items: - type: string - description: 'A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files.' - chunking_strategy: - $ref: '#/components/schemas/ChunkingStrategyRequestParam' - additionalProperties: false - AssistantStreamEvent: - oneOf: - - $ref: '#/components/schemas/ThreadStreamEvent' - - $ref: '#/components/schemas/RunStreamEvent' - - $ref: '#/components/schemas/RunStepStreamEvent' - - $ref: '#/components/schemas/MessageStreamEvent' - - $ref: '#/components/schemas/ErrorEvent' - - $ref: '#/components/schemas/DoneEvent' - description: "Represents an event emitted when streaming a Run.\n\nEach event in a server-sent events stream has an `event` and `data` property:\n\n```\nevent: thread.created\ndata: {\"id\": \"thread_123\", \"object\": \"thread\", ...}\n```\n\nWe emit events whenever a new object is created, transitions to a new state, or is being\nstreamed in parts (deltas). For example, we emit `thread.run.created` when a new run\nis created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses\nto create a message during a run, we emit a `thread.message.created event`, a\n`thread.message.in_progress` event, many `thread.message.delta` events, and finally a\n`thread.message.completed` event.\n\nWe may add additional events over time, so we recommend handling unknown events gracefully\nin your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to\nintegrate the Assistants API with streaming.\n" - x-oaiMeta: - name: Assistant stream events - beta: true - ThreadStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.created - type: string - data: - $ref: '#/components/schemas/ThreadObject' - description: 'Occurs when a new [thread](/docs/api-reference/threads/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [thread](/docs/api-reference/threads/object)' - RunStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.created - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a new [run](/docs/api-reference/runs/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.queued - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.in_progress - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.requires_action - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.completed - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.incomplete - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.failed - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.cancelling - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.cancelled - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.expired - type: string - data: - $ref: '#/components/schemas/RunObject' - description: 'Occurs when a [run](/docs/api-reference/runs/object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run](/docs/api-reference/runs/object)' - RunStepStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.created - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.in_progress - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.delta - type: string - data: - $ref: '#/components/schemas/RunStepDeltaObject' - description: 'Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.completed - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.failed - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) fails.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.cancelled - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.run.step.expired - type: string - data: - $ref: '#/components/schemas/RunStepObject' - description: 'Occurs when a [run step](/docs/api-reference/runs/step-object) expires.' - x-oaiMeta: - dataDescription: '`data` is a [run step](/docs/api-reference/runs/step-object)' - MessageStreamEvent: - oneOf: - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.created - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is created.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.in_progress - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.delta - type: string - data: - $ref: '#/components/schemas/MessageDeltaObject' - description: 'Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed.' - x-oaiMeta: - dataDescription: '`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)' - - required: - - event - - data - type: object - properties: - event: - enum: - - thread.message.completed - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - - required: - - event - - data + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter`` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The ID of the [vector + store](/docs/api-reference/vector-stores/object) attached to + this assistant. There can be a maximum of 1 vector store + attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata + x-oaiMeta: + name: The assistant object + beta: true + example: > + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698984975, + "name": "Math Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + AssistantStreamEvent: + description: > + Represents an event emitted when streaming a Run. + + + Each event in a server-sent events stream has an `event` and `data` + property: + + + ``` + + event: thread.created + + data: {"id": "thread_123", "object": "thread", ...} + + ``` + + + We emit events whenever a new object is created, transitions to a new + state, or is being + + streamed in parts (deltas). For example, we emit `thread.run.created` + when a new run + + is created, `thread.run.completed` when a run completes, and so on. When + an Assistant chooses + + to create a message during a run, we emit a `thread.message.created + event`, a + + `thread.message.in_progress` event, many `thread.message.delta` events, + and finally a + + `thread.message.completed` event. + + + We may add additional events over time, so we recommend handling unknown + events gracefully + + in your code. See the [Assistants API + quickstart](/docs/assistants/overview) to learn how to + + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + AssistantToolsCode: + type: object + title: Code interpreter tool + properties: + type: + type: string + description: "The type of tool being defined: `code_interpreter`" + enum: + - code_interpreter + required: + - type + AssistantToolsFileSearch: + type: object + title: FileSearch tool + properties: + type: + type: string + description: "The type of tool being defined: `file_search`" + enum: + - file_search + file_search: + type: object + description: Overrides for the file search tool. + properties: + max_num_results: + type: integer + minimum: 1 + maximum: 50 + description: > + The maximum number of results the file search tool should + output. The default is 20 for `gpt-4*` models and 5 for + `gpt-3.5-turbo`. This number should be between 1 and 50 + inclusive. + + + Note that the file search tool may output fewer than + `max_num_results` results. See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + ranking_options: + $ref: "#/components/schemas/FileSearchRankingOptions" + required: + - type + AssistantToolsFileSearchTypeOnly: + type: object + title: FileSearch tool + properties: + type: + type: string + description: "The type of tool being defined: `file_search`" + enum: + - file_search + required: + - type + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: + type: string + description: "The type of tool being defined: `function`" + enum: + - function + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + AssistantsApiResponseFormatOption: + description: > + Specifies the format that the model must output. Compatible with + [GPT-4o](/docs/models#gpt-4o), [GPT-4 + Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models + since `gpt-3.5-turbo-1106`. + + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables + Structured Outputs which ensures the model will match your supplied JSON + schema. Learn more in the [Structured Outputs + guide](/docs/guides/structured-outputs). + + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures + the message the model generates is valid JSON. + + + **Important:** when using JSON mode, you **must** also instruct the + model to produce JSON yourself via a system or user message. Without + this, the model may generate an unending stream of whitespace until the + generation reaches the token limit, resulting in a long-running and + seemingly "stuck" request. Also note that the message content may be + partially cut off if `finish_reason="length"`, which indicates the + generation exceeded `max_tokens` or the conversation exceeded the max + context length. + oneOf: + - type: string + description: | + `auto` is the default value + enum: + - auto + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + AssistantsApiToolChoiceOption: + description: > + Controls which (if any) tool is called by the model. + + `none` means the model will not call any tools and instead generates a + message. + + `auto` is the default value and means the model can pick between + generating a message or calling one or more tools. + + `required` means the model must call one or more tools before responding + to the user. + + Specifying a particular tool like `{"type": "file_search"}` or `{"type": + "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates + a message. `auto` means the model can pick between generating a + message or calling one or more tools. `required` means the model + must call one or more tools before responding to the user. + enum: + - none + - auto + - required + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + AssistantsNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to + call a specific tool. + properties: + type: + type: string + enum: + - function + - code_interpreter + - file_search + description: The type of the tool. If type is `function`, the function name must + be set + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + AudioResponseFormat: + description: > + The format of the output, in one of these options: `json`, `text`, + `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + AuditLog: + type: object + description: A log of a user action or configuration change within this organization. + properties: + id: + type: string + description: The ID of this log. + type: + $ref: "#/components/schemas/AuditLogEventType" + effective_at: + type: integer + description: The Unix timestamp (in seconds) of the event. + project: + type: object + description: The project that the action was scoped to. Absent for actions not + scoped to projects. + properties: + id: + type: string + description: The project ID. + name: + type: string + description: The project title. + actor: + $ref: "#/components/schemas/AuditLogActor" + api_key.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + data: + type: object + description: The payload used to create the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. + `["api.model.request"]` + api_key.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + changes_requested: + type: object + description: The payload used to update the API key. + properties: + scopes: + type: array + items: + type: string + description: A list of scopes allowed for the API key, e.g. + `["api.model.request"]` + api_key.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The tracking ID of the API key. + invite.sent: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + data: + type: object + description: The payload used to create the invite. + properties: + email: + type: string + description: The email invited to the organization. + role: + type: string + description: The role the email was invited to be. Is either `owner` or + `member`. + invite.accepted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + invite.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The ID of the invite. + login.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + logout.failed: + type: object + description: The details for events with this `type`. + properties: + error_code: + type: string + description: The error code of the failure. + error_message: + type: string + description: The error message of the failure. + organization.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The organization ID. + changes_requested: + type: object + description: The payload used to update the organization settings. + properties: + title: + type: string + description: The organization title. + description: + type: string + description: The organization description. + name: + type: string + description: The organization name. + settings: type: object properties: - event: - enum: - - thread.message.incomplete - type: string - data: - $ref: '#/components/schemas/MessageObject' - description: 'Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed.' - x-oaiMeta: - dataDescription: '`data` is a [message](/docs/api-reference/messages/object)' - ErrorEvent: - required: - - event - - data - type: object - properties: - event: - enum: - - error - type: string - data: - $ref: '#/components/schemas/Error' - description: 'Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout.' - x-oaiMeta: - dataDescription: '`data` is an [error](/docs/guides/error-codes/api-errors)' - DoneEvent: - required: - - event - - data - type: object - properties: - event: - enum: - - done - type: string - data: - enum: - - '[DONE]' - type: string - description: Occurs when a stream ends. - x-oaiMeta: - dataDescription: '`data` is `[DONE]`' - Batch: - required: - - id - - object - - endpoint - - input_file_id - - completion_window - - status - - created_at - type: object - properties: - id: - type: string - object: - enum: - - batch - type: string - description: 'The object type, which is always `batch`.' - endpoint: - type: string - description: The OpenAI API endpoint used by the batch. - errors: - type: object - properties: - object: - type: string - description: 'The object type, which is always `list`.' - data: - type: array - items: - type: object - properties: - code: - type: string - description: An error code identifying the error type. - message: - type: string - description: A human-readable message providing more details about the error. - param: - type: string - description: 'The name of the parameter that caused the error, if applicable.' - nullable: true - line: - type: integer - description: 'The line number of the input file where the error occurred, if applicable.' - nullable: true - input_file_id: - type: string - description: The ID of the input file for the batch. - completion_window: - type: string - description: The time frame within which the batch should be processed. - status: - enum: - - validating - - failed - - in_progress - - finalizing - - completed - - expired - - cancelling - - cancelled - type: string - description: The current status of the batch. - output_file_id: - type: string - description: The ID of the file containing the outputs of successfully executed requests. - error_file_id: - type: string - description: The ID of the file containing the outputs of requests with errors. - created_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was created. - in_progress_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started processing. - expires_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch will expire. - finalizing_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started finalizing. - completed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was completed. - failed_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch failed. - expired_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch expired. - cancelling_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch started cancelling. - cancelled_at: - type: integer - description: The Unix timestamp (in seconds) for when the batch was cancelled. - request_counts: - required: - - total - - completed - - failed - type: object - properties: - total: - type: integer - description: Total number of requests in the batch. - completed: - type: integer - description: Number of requests that have been completed successfully. - failed: - type: integer - description: Number of requests that have failed. - description: The request counts for different statuses within the batch. - metadata: - type: object - description: "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.\n" - nullable: true - x-oaiTypeLabel: map - x-oaiMeta: - name: The batch object - example: "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" - BatchRequestInput: - type: object - properties: - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. - method: - enum: - - POST - type: string - description: The HTTP method to be used for the request. Currently only `POST` is supported. - url: - type: string - description: 'The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.' - description: The per-line object of the batch input file - x-oaiMeta: - name: The request input object - example: "{\"custom_id\": \"request-1\", \"method\": \"POST\", \"url\": \"/v1/chat/completions\", \"body\": {\"model\": \"gpt-4o-mini\", \"messages\": [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"}, {\"role\": \"user\", \"content\": \"What is 2+2?\"}]}}\n" - BatchRequestOutput: - type: object - properties: - id: - type: string - custom_id: - type: string - description: A developer-provided per-request id that will be used to match outputs to inputs. - response: - type: object - properties: - status_code: - type: integer - description: The HTTP status code of the response - request_id: - type: string - description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. - body: - type: object - description: The JSON body of the response - x-oaiTypeLabel: map - nullable: true - error: - type: object - properties: - code: - type: string - description: A machine-readable error code. - message: - type: string - description: A human-readable error message. - description: 'For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure.' + threads_ui_visibility: + type: string + description: Visibility of the threads page which shows messages created with + the Assistants API and Playground. One of `ANY_ROLE`, + `OWNERS`, or `NONE`. + usage_dashboard_visibility: + type: string + description: Visibility of the usage dashboard which shows activity and costs + for your organization. One of `ANY_ROLE` or `OWNERS`. + project.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + data: + type: object + description: The payload used to create the project. + properties: + name: + type: string + description: The project name. + title: + type: string + description: The title of the project as seen on the dashboard. + project.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the project. + properties: + title: + type: string + description: The title of the project as seen on the dashboard. + project.archived: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + rate_limit.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The rate limit ID + changes_requested: + type: object + description: The payload used to update the rate limits. + properties: + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only relevant for certain models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only relevant for certain + models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only relevant for certain models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only relevant for certain + models. + rate_limit.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The rate limit ID + service_account.created: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + data: + type: object + description: The payload used to create the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + changes_requested: + type: object + description: The payload used to updated the service account. + properties: + role: + type: string + description: The role of the service account. Is either `owner` or `member`. + service_account.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The service account ID. + user.added: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + data: + type: object + description: The payload used to add the user to the project. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.updated: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The project ID. + changes_requested: + type: object + description: The payload used to update the user. + properties: + role: + type: string + description: The role of the user. Is either `owner` or `member`. + user.deleted: + type: object + description: The details for events with this `type`. + properties: + id: + type: string + description: The user ID. + required: + - id + - type + - effective_at + - actor + x-oaiMeta: + name: The audit log object + example: > + { + "id": "req_xxx_20240101", + "type": "api_key.created", + "effective_at": 1720804090, + "actor": { + "type": "session", + "session": { + "user": { + "id": "user-xxx", + "email": "user@example.com" + }, + "ip_address": "127.0.0.1", + "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + } + }, + "api_key.created": { + "id": "key_xxxx", + "data": { + "scopes": ["resource.operation"] + } + } + } + AuditLogActor: + type: object + description: The actor who performed the audit logged action. + properties: + type: + type: string + description: The type of actor. Is either `session` or `api_key`. + enum: + - session + - api_key + session: + type: object + $ref: "#/components/schemas/AuditLogActorSession" + api_key: + type: object + $ref: "#/components/schemas/AuditLogActorApiKey" + AuditLogActorApiKey: + type: object + description: The API Key used to perform the audit logged action. + properties: + id: + type: string + description: The tracking id of the API key. + type: + type: string + description: The type of API key. Can be either `user` or `service_account`. + enum: + - user + - service_account + user: + $ref: "#/components/schemas/AuditLogActorUser" + service_account: + $ref: "#/components/schemas/AuditLogActorServiceAccount" + AuditLogActorServiceAccount: + type: object + description: The service account that performed the audit logged action. + properties: + id: + type: string + description: The service account id. + AuditLogActorSession: + type: object + description: The session in which the audit logged action was performed. + properties: + user: + $ref: "#/components/schemas/AuditLogActorUser" + ip_address: + type: string + description: The IP address from which the action was performed. + AuditLogActorUser: + type: object + description: The user who performed the audit logged action. + properties: + id: + type: string + description: The user id. + email: + type: string + description: The user email. + AuditLogEventType: + type: string + description: The event type. + x-oaiExpandable: true + enum: + - api_key.created + - api_key.updated + - api_key.deleted + - invite.sent + - invite.accepted + - invite.deleted + - login.succeeded + - login.failed + - logout.succeeded + - logout.failed + - organization.updated + - project.created + - project.updated + - project.archived + - service_account.created + - service_account.updated + - service_account.deleted + - rate_limit.updated + - rate_limit.deleted + - user.added + - user.updated + - user.deleted + AutoChunkingStrategyRequestParam: + type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a + `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: + - auto + required: + - type + Batch: + type: object + properties: + id: + type: string + object: + type: string + enum: + - batch + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. nullable: true - description: The per-line object of the batch output and error files - x-oaiMeta: - name: The request output object - example: "{\"id\": \"batch_req_wnaDys\", \"custom_id\": \"request-2\", \"response\": {\"status_code\": 200, \"request_id\": \"req_c187b3\", \"body\": {\"id\": \"chatcmpl-9758Iw\", \"object\": \"chat.completion\", \"created\": 1711475054, \"model\": \"gpt-4o-mini\", \"choices\": [{\"index\": 0, \"message\": {\"role\": \"assistant\", \"content\": \"2 + 2 equals 4.\"}, \"finish_reason\": \"stop\"}], \"usage\": {\"prompt_tokens\": 24, \"completion_tokens\": 15, \"total_tokens\": 39}, \"system_fingerprint\": null}}, \"error\": null}\n" - ListBatchesResponse: - required: - - object - - data - - has_more - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Batch' - first_id: - type: string - example: batch_abc123 - last_id: - type: string - example: batch_abc456 - has_more: - type: boolean - object: - enum: - - list - type: string - AuditLogActorServiceAccount: - type: object - properties: - id: - type: string - description: The service account id. - description: The service account that performed the audit logged action. - AuditLogActorUser: - type: object - properties: - id: - type: string - description: The user id. - email: - type: string - description: The user email. - description: The user who performed the audit logged action. - AuditLogActorApiKey: - type: object - properties: - id: - type: string - description: The tracking id of the API key. - type: - enum: - - user - - service_account - type: string - description: The type of API key. Can be either `user` or `service_account`. - user: - $ref: '#/components/schemas/AuditLogActorUser' - service_account: - $ref: '#/components/schemas/AuditLogActorServiceAccount' - description: The API Key used to perform the audit logged action. - AuditLogActorSession: - type: object - properties: - user: - $ref: '#/components/schemas/AuditLogActorUser' - ip_address: - type: string - description: The IP address from which the action was performed. - description: The session in which the audit logged action was performed. - AuditLogActor: + line: + type: integer + description: The line number of the input file where the error occurred, if + applicable. + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + type: string + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed + requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started + processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started + finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started + cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: + type: object + properties: + total: + type: integer + description: Total number of requests in the batch. + completed: + type: integer + description: Number of requests that have been completed successfully. + failed: + type: integer + description: Number of requests that have failed. + required: + - total + - completed + - failed + description: The request counts for different statuses within the batch. + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + x-oaiMeta: + name: The batch object + example: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + BatchRequestInput: + type: object + description: The per-line object of the batch input file + properties: + custom_id: + type: string + description: A developer-provided per-request id that will be used to match + outputs to inputs. Must be unique for each request in a batch. + method: + type: string + enum: + - POST + description: The HTTP method to be used for the request. Currently only `POST` + is supported. + url: + type: string + description: The OpenAI API relative URL to be used for the request. Currently + `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are + supported. + x-oaiMeta: + name: The request input object + example: > + {"custom_id": "request-1", "method": "POST", "url": + "/v1/chat/completions", "body": {"model": "gpt-4o-mini", "messages": + [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is 2+2?"}]}} + BatchRequestOutput: + type: object + description: The per-line object of the batch output and error files + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match + outputs to inputs. + response: + type: object + nullable: true + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include + this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: + type: object + nullable: true + description: For requests that failed with a non-HTTP error, this will contain + more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + x-oaiMeta: + name: The request output object + example: > + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": + {"status_code": 200, "request_id": "req_c187b3", "body": {"id": + "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, + "model": "gpt-4o-mini", "choices": [{"index": 0, "message": {"role": + "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], + "usage": {"prompt_tokens": 24, "completion_tokens": 15, + "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + CancelUploadRequest: + type: object + additionalProperties: false + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces + the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to + choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or + contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + ChatCompletionMessageToolCall: + type: object + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + required: + - name + - arguments + required: + - id + - type + - function + ChatCompletionMessageToolCallChunk: + type: object + properties: + index: + type: integer + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + required: + - index + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + ChatCompletionModalities: + type: array + nullable: true + description: > + Output types that you would like the model to generate for this request. + + Most models are capable of generating text, which is the default: + + + `["text"]` + + + The `gpt-4o-audio-preview` model can also be used to [generate + audio](/docs/guides/audio). To + + request that this model generate both text and audio responses, you can + + use: + + + `["text", "audio"]` + items: + type: string + enum: + - text + - audio + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to + call a specific function. + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: + x-oaiExpandable: true + nullable: true + oneOf: + - type: string + description: The contents of the assistant message. + title: Text content + - type: array + description: An array of content parts with a defined type. Can be one or more + of type `text`, or exactly one of type `refusal`. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + minItems: 1 + description: > + The contents of the assistant message. Required unless `tool_calls` + or `function_call` is specified. + refusal: + nullable: true + type: string + description: The refusal message by the assistant. + role: + type: string + enum: + - assistant + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model + information to differentiate between participants of the same role. + audio: + type: object + nullable: true + x-oaiExpandable: true + description: | + Data about a previous audio response from the model. + [Learn more](/docs/guides/audio). + required: + - id + properties: + id: + type: string + description: | + Unique identifier for a previous audio response from the model. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: Deprecated and replaced by `tool_calls`. The name and arguments of + a function that should be called, as generated by the model. + nullable: true + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role + ChatCompletionRequestAssistantMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + x-oaiExpandable: true + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: + - function + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + ChatCompletionRequestMessageContentPartAudio: + type: object + title: Audio content part + description: | + Learn about [audio inputs](/docs/guides/audio). + properties: + type: + type: string + enum: + - input_audio + description: The type of the content part. Always `input_audio`. + input_audio: + type: object + properties: + data: + type: string + description: Base64 encoded audio data. + format: + type: string + enum: + - wav + - mp3 + description: > + The format of the encoded audio data. Currently supports "wav" + and "mp3". + required: + - data + - format + required: + - type + - input_audio + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + description: | + Learn about [image inputs](/docs/guides/vision). + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision + guide](/docs/guides/vision#low-or-high-fidelity-image-understanding). + enum: + - auto + - low + - high + default: auto + required: + - url + required: + - type + - image_url + ChatCompletionRequestMessageContentPartRefusal: + type: object + title: Refusal content part + properties: + type: + type: string + enum: + - refusal + description: The type of the content part. + refusal: + type: string + description: The refusal message generated by the model. + required: + - type + - refusal + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + description: | + Learn about [text inputs](/docs/guides/text-generation). + properties: + type: + type: string + enum: + - text + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + oneOf: + - type: string + description: The contents of the system message. + title: Text content + - type: array + description: An array of content parts with a defined type. For system messages, + only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + minItems: 1 + role: + type: string + enum: + - system + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model + information to differentiate between participants of the same role. + required: + - content + - role + ChatCompletionRequestSystemMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: + - tool + description: The role of the messages author, in this case `tool`. + content: + oneOf: + - type: string + description: The contents of the tool message. + title: Text content + - type: array + description: An array of content parts with a defined type. For tool messages, + only type `text` is supported. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestToolMessageContentPart" + minItems: 1 + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + ChatCompletionRequestToolMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + x-oaiExpandable: true + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type. Supported options + differ based on the [model](/docs/models) being used to generate + the response. Can contain text, image, or audio inputs. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestUserMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: + - user + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model + information to differentiate between participants of the same role. + required: + - content + - role + ChatCompletionRequestUserMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartAudio" + x-oaiExpandable: true + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. + nullable: true + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: + - assistant + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: Deprecated and replaced by `tool_calls`. The name and arguments of + a function that should be called, as generated by the model. + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + audio: + type: object + nullable: true + description: > + If the audio output modality is requested, this object contains data + + about the audio response from the model. [Learn + more](/docs/guides/audio). + x-oaiExpandable: true + required: + - id + - expires_at + - data + - transcript + properties: + id: + type: string + description: Unique identifier for this audio response. + expires_at: + type: integer + description: > + The Unix timestamp (in seconds) for when this audio response + will + + no longer be accessible on the server for use in multi-turn + + conversations. + data: + type: string + description: | + Base64 encoded audio bytes generated by the model, in the format + specified in the request. + transcript: + type: string + description: Transcript of the audio generated by the model. + required: + - role + - content + - refusal + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + ChatCompletionStreamOptions: + description: > + Options for streaming response. Only set this when you set `stream: + true`. + type: object + nullable: true + default: null + properties: + include_usage: + type: boolean + description: > + If set, an additional chunk will be streamed before the `data: + [DONE]` message. The `usage` field on this chunk shows the token + usage statistics for the entire request, and the `choices` field + will always be an empty array. All other chunks will also include a + `usage` field, but with a null value. + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: Deprecated and replaced by `tool_calls`. The name and arguments of + a function that should be called, as generated by the model. + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model + in JSON format. Note that the model does not always generate + valid JSON, and may hallucinate parameters not defined by your + function schema. Validate the arguments in your code before + calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: + - system + - user + - assistant + - tool + description: The role of the author of this message. + refusal: + type: string + description: The refusal message generated by the model. + nullable: true + ChatCompletionTokenLogprob: + type: object + properties: + token: &a1 + description: The token. + type: string + logprob: &a2 + description: The log probability of this token, if it is within the top 20 most + likely tokens. Otherwise, the value `-9999.0` is used to signify + that the token is very unlikely. + type: number + bytes: &a3 + description: A list of integers representing the UTF-8 bytes representation of + the token. Useful in instances where characters are represented by + multiple tokens and their byte representations must be combined to + generate the correct text representation. Can be `null` if there is + no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this + token position. In rare cases, there may be fewer than the number of + requested `top_logprobs` returned. + type: array + items: type: object properties: - type: - enum: - - session - - api_key - type: string - description: The type of actor. Is either `session` or `api_key`. - session: - $ref: '#/components/schemas/AuditLogActorSession' - api_key: - $ref: '#/components/schemas/AuditLogActorApiKey' - description: The actor who performed the audit logged action. - AuditLogEventType: - enum: - - api_key.created - - api_key.updated - - api_key.deleted - - invite.sent - - invite.accepted - - invite.deleted - - login.succeeded - - login.failed - - logout.succeeded - - logout.failed - - organization.updated - - project.created - - project.updated - - project.archived - - service_account.created - - service_account.updated - - service_account.deleted - - user.added - - user.updated - - user.deleted - type: string - description: The event type. - x-oaiExpandable: true - AuditLog: + token: *a1 + logprob: *a2 + bytes: *a3 required: - - id - - type - - effective_at - - actor - type: object - properties: - id: - type: string - description: The ID of this log. - type: - $ref: '#/components/schemas/AuditLogEventType' - effective_at: - type: integer - description: The Unix timestamp (in seconds) of the event. - project: - type: object - properties: - id: - type: string - description: The project ID. - name: - type: string - description: The project title. - description: The project that the action was scoped to. Absent for actions not scoped to projects. - actor: - $ref: '#/components/schemas/AuditLogActor' - api_key.created: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - data: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to create the API key. - description: The details for events with this `type`. - api_key.updated: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - changes_requested: - type: object - properties: - scopes: - type: array - items: - type: string - description: 'A list of scopes allowed for the API key, e.g. `["api.model.request"]`' - description: The payload used to update the API key. - description: The details for events with this `type`. - api_key.deleted: - type: object - properties: - id: - type: string - description: The tracking ID of the API key. - description: The details for events with this `type`. - invite.sent: - type: object - properties: - id: - type: string - description: The ID of the invite. - data: - type: object - properties: - email: - type: string - description: The email invited to the organization. - role: - type: string - description: The role the email was invited to be. Is either `owner` or `member`. - description: The payload used to create the invite. - description: The details for events with this `type`. - invite.accepted: - type: object - properties: - id: - type: string - description: The ID of the invite. - description: The details for events with this `type`. - invite.deleted: - type: object - properties: - id: - type: string - description: The ID of the invite. - description: The details for events with this `type`. - login.failed: - type: object - properties: - error_code: - type: string - description: The error code of the failure. - error_message: - type: string - description: The error message of the failure. - description: The details for events with this `type`. - logout.failed: - type: object - properties: - error_code: - type: string - description: The error code of the failure. - error_message: - type: string - description: The error message of the failure. - description: The details for events with this `type`. - organization.updated: - type: object - properties: - id: - type: string - description: The organization ID. - changes_requested: - type: object - properties: - title: - type: string - description: The organization title. - description: - type: string - description: The organization description. - name: - type: string - description: The organization name. - settings: - type: object - properties: - threads_ui_visibility: - type: string - description: 'Visibility of the threads page which shows messages created with the Assistants API and Playground. One of `ANY_ROLE`, `OWNERS`, or `NONE`.' - usage_dashboard_visibility: - type: string - description: Visibility of the usage dashboard which shows activity and costs for your organization. One of `ANY_ROLE` or `OWNERS`. - description: The payload used to update the organization settings. - description: The details for events with this `type`. - project.created: - type: object - properties: - id: - type: string - description: The project ID. - data: - type: object - properties: - name: - type: string - description: The project name. - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to create the project. - description: The details for events with this `type`. - project.updated: - type: object - properties: - id: - type: string - description: The project ID. - changes_requested: - type: object - properties: - title: - type: string - description: The title of the project as seen on the dashboard. - description: The payload used to update the project. - description: The details for events with this `type`. - project.archived: - type: object - properties: - id: - type: string - description: The project ID. - description: The details for events with this `type`. - service_account.created: - type: object - properties: - id: - type: string - description: The service account ID. - data: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to create the service account. - description: The details for events with this `type`. - service_account.updated: - type: object - properties: - id: - type: string - description: The service account ID. - changes_requested: - type: object - properties: - role: - type: string - description: The role of the service account. Is either `owner` or `member`. - description: The payload used to updated the service account. - description: The details for events with this `type`. - service_account.deleted: - type: object - properties: - id: - type: string - description: The service account ID. - description: The details for events with this `type`. - user.added: + - token + - logprob + - bytes + required: + - token + - logprob + - bytes + - top_logprobs + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: + - function + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + ChatCompletionToolChoiceOption: + description: > + Controls which (if any) tool is called by the model. + + `none` means the model will not call any tool and instead generates a + message. + + `auto` means the model can pick between generating a message or calling + one or more tools. + + `required` means the model must call one or more tools. + + Specifying a particular tool via `{"type": "function", "function": + {"name": "my_function"}}` forces the model to call that tool. + + + `none` is the default when no tools are present. `auto` is the default + if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates + a message. `auto` means the model can pick between generating a + message or calling one or more tools. `required` means the model + must call one or more tools. + enum: + - none + - auto + - required + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + ChunkingStrategyRequestParam: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + CompleteUploadRequest: + type: object + additionalProperties: false + properties: + part_ids: + type: array + description: | + The ordered list of Part IDs. + items: + type: string + md5: + description: > + The optional md5 checksum for the file contents to verify if the + bytes uploaded matches what you expect. + type: string + required: + - part_ids + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + completion_tokens_details: + type: object + description: Breakdown of tokens used in a completion. + properties: + accepted_prediction_tokens: + type: integer + description: | + When using Predicted Outputs, the number of tokens in the + prediction that appeared in the completion. + audio_tokens: + type: integer + description: Audio input tokens generated by the model. + reasoning_tokens: + type: integer + description: Tokens generated by the model for reasoning. + rejected_prediction_tokens: + type: integer + description: > + When using Predicted Outputs, the number of tokens in the + + prediction that did not appear in the completion. However, like + + reasoning tokens, these tokens are still counted in the total + + completion tokens for purposes of billing, output, and context + window + + limits. + prompt_tokens_details: + type: object + description: Breakdown of tokens used in the prompt. + properties: + audio_tokens: + type: integer + description: Audio input tokens present in the prompt. + cached_tokens: + type: integer + description: Cached tokens present in the prompt. + required: + - prompt_tokens + - completion_tokens + - total_tokens + CostsResult: + type: object + description: The aggregated costs details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.costs.result + amount: + type: object + description: The monetary value in its associated currency. + properties: + value: + type: number + description: The numeric value of the cost. + currency: + type: string + description: Lowercase ISO-4217 currency e.g. "usd" + line_item: + type: string + description: When `group_by=line_item`, this field provides the line item of the + grouped costs result. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped costs result. + required: + - object + - sessions + x-oaiMeta: + name: Costs object + example: | + { + "object": "orgainzation.costs.result", + "amount": { + "value": 0.06, + "currency": "usd" + }, + "line_item": "Image models", + "project_id": "proj_abc" + } + CreateAssistantRequest: + type: object + additionalProperties: false + properties: + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + name: + description: | + The name of the assistant. The maximum length is 256 characters. + type: string + nullable: true + maxLength: 256 + description: + description: > + The description of the assistant. The maximum length is 512 + characters. + type: string + nullable: true + maxLength: 512 + instructions: + description: > + The system instructions that the assistant uses. The maximum length + is 256,000 characters. + type: string + nullable: true + maxLength: 256000 + tools: + description: > + A list of tool enabled on the assistant. There can be a maximum of + 128 tools per assistant. Tools can be of types `code_interpreter`, + `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this assistant. There can be a maximum of 1 + vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: > + A helper to create a [vector + store](/docs/api-reference/vector-stores/object) with + file_ids and attach it to this assistant. There can be a + maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: type: object properties: - id: - type: string - description: The user ID. - data: - type: object + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs to add + to the vector store. There can be a maximum of 10000 + files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a + `max_chunk_size_tokens` of `800` and + `chunk_overlap_tokens` of `400`. + additionalProperties: false properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to add the user to the project. - description: The details for events with this `type`. - user.updated: - type: object - properties: - id: - type: string - description: The project ID. - changes_requested: - type: object + type: + type: string + description: Always `auto`. + enum: + - auto + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false properties: - role: - type: string - description: The role of the user. Is either `owner` or `member`. - description: The payload used to update the user. - description: The details for events with this `type`. - user.deleted: - type: object - properties: - id: - type: string - description: The user ID. - description: The details for events with this `type`. - description: A log of a user action or configuration change within this organization. - x-oaiMeta: - name: The audit log object - example: "{\n \"id\": \"req_xxx_20240101\",\n \"type\": \"api_key.created\",\n \"effective_at\": 1720804090,\n \"actor\": {\n \"type\": \"session\",\n \"session\": {\n \"user\": {\n \"id\": \"user-xxx\",\n \"email\": \"user@example.com\"\n },\n \"ip_address\": \"127.0.0.1\",\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n },\n \"api_key.created\": {\n \"id\": \"key_xxxx\",\n \"data\": {\n \"scopes\": [\"resource.operation\"]\n }\n }\n}\n" - ListAuditLogsResponse: - required: - - object - - data - - first_id - - last_id - - has_more + type: + type: string + description: Always `static`. + enum: + - static + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is + `800`. The minimum value is `100` and the + maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: > + The number of tokens that overlap between + chunks. The default value is `400`. + + + Note that the overlap must not exceed half + of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: > + Set of 16 key-value pairs that can be attached to a + vector store. This can be useful for storing + additional information about the vector store in a + structured format. Keys can be a maximum of 64 + characters long and values can be a maximum of 512 + characters long. + x-oaiTypeLabel: map + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - model + CreateChatCompletionFunctionResponse: + type: object + description: Represents a chat completion response returned by model, based on + the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is + greater than 1. + items: type: object - properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/AuditLog' - first_id: - type: string - example: audit_log-defb456h8dks - last_id: - type: string - example: audit_log-hnbkd8s93s - has_more: - type: boolean - Invite: required: - - object - - id - - email - - role - - status - - invited_at - - expires_at - type: object + - finish_reason + - index + - message + - logprobs properties: - object: - enum: - - organization.invite - type: string - description: 'The object type, which is always `organization.invite`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - email: - type: string - description: The email address of the individual to whom the invite was sent - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - status: - enum: - - accepted - - expired - - pending - type: string - description: '`accepted`,`expired`, or `pending`' - invited_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was sent. - expires_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite expires. - accepted_at: - type: integer - description: The Unix timestamp (in seconds) of when the invite was accepted. - description: Represents an individual `invite` to the organization. - x-oaiMeta: - name: The invite object - example: "{\n \"object\": \"organization.invite\",\n \"id\": \"invite-abc\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"status\": \"accepted\",\n \"invited_at\": 1711471533,\n \"expires_at\": 1711471533,\n \"accepted_at\": 1711471533\n}\n" - InviteListResponse: - required: - - object - - data + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, `length` if the maximum number of tokens + specified in the request was reached, `content_filter` if + content was omitted due to a flag from our content filters, or + `function_call` if the model called a function. + enum: + - stop + - length + - function_call + - content_filter + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was + created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: + - chat.completion + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: | + { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-4o-mini", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 17, + "total_tokens": 99, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned + by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: > + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nThis image shows a wooden boardwalk extending through a lush green marshland.", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + } + } + CreateChatCompletionRequest: + type: object + properties: + messages: + description: > + A list of messages comprising the conversation so far. Depending on + the + + [model](/docs/models) you use, different message types (modalities) + are + + supported, like [text](/docs/guides/text-generation), + + [images](/docs/guides/vision), and [audio](/docs/guides/audio). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint + compatibility](/docs/models#model-endpoint-compatibility) table for + details on which models work with the Chat API. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - o1-preview + - o1-preview-2024-09-12 + - o1-mini + - o1-mini-2024-09-12 + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-realtime-preview + - gpt-4o-realtime-preview-2024-10-01 + - gpt-4o-audio-preview + - gpt-4o-audio-preview-2024-10-01 + - chatgpt-4o-latest + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0301 + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + store: + type: boolean + default: false + nullable: true + description: > + Whether or not to store the output of this chat completion request + + for use in our [model distillation](/docs/guides/distillation) or + [evals](/docs/guides/evals) products. + metadata: + type: object + nullable: true + description: | + Developer-defined tags and values used for filtering completions + in the [dashboard](https://platform.openai.com/chat-completions). + additionalProperties: + type: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: > + Modify the likelihood of specified tokens appearing in the + completion. + + + Accepts a JSON object that maps tokens (specified by their token ID + in the tokenizer) to an associated bias value from -100 to 100. + Mathematically, the bias is added to the logits generated by the + model prior to sampling. The exact effect will vary per model, but + values between -1 and 1 should decrease or increase likelihood of + selection; values like -100 or 100 should result in a ban or + exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If + true, returns the log probabilities of each output token returned in + the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely + tokens to return at each token position, each with an associated log + probability. `logprobs` must be set to `true` if this parameter is + used. + type: integer + minimum: 0 + maximum: 20 + nullable: true + max_tokens: + description: > + The maximum number of [tokens](/tokenizer) that can be generated in + the chat completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + + This value is now deprecated in favor of `max_completion_tokens`, + and is not compatible with [o1 series + models](/docs/guides/reasoning). + type: integer + nullable: true + deprecated: true + max_completion_tokens: + description: > + An upper bound for the number of tokens that can be generated for a + completion, including visible output tokens and [reasoning + tokens](/docs/guides/reasoning). + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input + message. Note that you will be charged based on the number of + generated tokens across all of the choices. Keep `n` as `1` to + minimize costs. + modalities: + $ref: "#/components/schemas/ChatCompletionModalities" + prediction: + nullable: true + x-oaiExpandable: true + description: > + Configuration for a [Predicted + Output](/docs/guides/predicted-outputs), + + which can greatly improve response times when large parts of the + model + + response are known ahead of time. This is most common when you are + + regenerating a file with only minor changes to most of the content. + oneOf: + - $ref: "#/components/schemas/PredictionContent" + audio: + type: object + nullable: true + description: > + Parameters for audio output. Required when audio output is requested + with + + `modalities: ["audio"]`. [Learn more](/docs/guides/audio). + required: + - voice + - format + x-oaiExpandable: true + properties: + voice: + type: string + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + description: > + The voice the model uses to respond. Supported voices are `ash`, + `ballad`, `coral`, `sage`, and `verse` (also supported but not + recommended are `alloy`, `echo`, and `shimmer`; these voices are + less expressive). + format: + type: string + enum: + - wav + - mp3 + - flac + - opus + - pcm16 + description: > + Specifies the output audio format. Must be one of `wav`, `mp3`, + `flac`, + + `opus`, or `pcm16`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + response_format: + description: > + An object specifying the format that the model must output. + Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4o + mini](/docs/models#gpt-4o-mini), [GPT-4 + Turbo](/docs/models#gpt-4-turbo-and-gpt-4) and all GPT-3.5 Turbo + models newer than `gpt-3.5-turbo-1106`. + + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables + Structured Outputs which ensures the model will match your supplied + JSON schema. Learn more in the [Structured Outputs + guide](/docs/guides/structured-outputs). + + + Setting to `{ "type": "json_object" }` enables JSON mode, which + ensures the message the model generates is valid JSON. + + + **Important:** when using JSON mode, you **must** also instruct the + model to produce JSON yourself via a system or user message. Without + this, the model may generate an unending stream of whitespace until + the generation reaches the token limit, resulting in a long-running + and seemingly "stuck" request. Also note that the message content + may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation + exceeded the max context length. + oneOf: + - $ref: "#/components/schemas/ResponseFormatText" + - $ref: "#/components/schemas/ResponseFormatJsonObject" + - $ref: "#/components/schemas/ResponseFormatJsonSchema" + x-oaiExpandable: true + seed: + type: integer + minimum: -9223372036854776000 + maximum: 9223372036854776000 + nullable: true + description: > + This feature is in Beta. + + If specified, our system will make a best effort to sample + deterministically, such that repeated requests with the same `seed` + and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the + `system_fingerprint` response parameter to monitor changes in the + backend. + x-oaiMeta: + beta: true + service_tier: + description: > + Specifies the latency tier to use for processing the request. This + parameter is relevant for customers subscribed to the scale tier + service: + - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + type: string + enum: + - auto + - default + nullable: true + default: auto + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens + will be sent as data-only [server-sent + events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: + [DONE]` message. [Example Python + code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or `temperature` but not both. + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are + supported as a tool. Use this to provide a list of functions the + model may generate JSON inputs for. A max of 128 functions are + supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + function_call: + deprecated: true + description: > + Deprecated in favor of `tool_choice`. + + + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead + generates a message. + + `auto` means the model can pick between generating a message or + calling a function. + + Specifying a particular function via `{"name": "my_function"}` + forces the model to call that function. + + + `none` is the default when no functions are present. `auto` is the + default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead + generates a message. `auto` means the model can pick between + generating a message or calling a function. + enum: + - none + - auto + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true + description: | + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + required: + - model + - messages + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on + the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is + greater than 1. + items: type: object + required: + - finish_reason + - index + - message + - logprobs properties: - object: - enum: - - list - type: string - description: 'The object type, which is always `list`' - data: + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, + + `length` if the maximum number of tokens specified in the + request was reached, + + `content_filter` if content was omitted due to a flag from our + content filters, + + `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. type: array items: - $ref: '#/components/schemas/Invite' - first_id: - type: string - description: The first `invite_id` in the retrieved `list` - last_id: - type: string - description: The last `invite_id` in the retrieved `list` - has_more: - type: boolean - description: The `has_more` property is used for pagination to indicate there are additional results. - InviteRequest: - required: - - email - - role - type: object - properties: - email: - type: string - description: Send an email to this address - role: - enum: - - reader - - owner - type: string - description: '`owner` or `reader`' - InviteDeleteResponse: - required: - - object - - id - - deleted - type: object - properties: - object: - enum: - - organization.invite.deleted - type: string - description: 'The object type, which is always `organization.invite.deleted`' - id: - type: string - deleted: - type: boolean - User: - required: - - object - - id - - name - - email - - role - - added_at - type: object - properties: - object: - enum: - - organization.user - type: string - description: 'The object type, which is always `organization.user`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the user was added. - description: Represents an individual `user` within an organization. - x-oaiMeta: - name: The user object - example: "{\n \"object\": \"organization.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - UserListResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - enum: - - list - type: string - data: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + refusal: + description: A list of message refusal tokens with log probability information. type: array items: - $ref: '#/components/schemas/User' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - UserRoleUpdateRequest: - required: - - role - type: object - properties: - role: - enum: - - owner - - reader - type: string - description: '`owner` or `reader`' - UserDeleteResponse: - required: - - object - - id - - deleted + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was + created. + model: + type: string + description: The model used for the chat completion. + service_tier: + description: The service tier used for processing the request. This field is + only included if the `service_tier` parameter is specified in the + request. + type: string + enum: + - scale + - default + example: scale + nullable: true + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: + - chat.completion + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: | + { + "id": "chatcmpl-123456", + "object": "chat.completion", + "created": 1728933352, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hi there! How can I assist you today?", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 19, + "completion_tokens": 10, + "total_tokens": 29, + "prompt_tokens_details": { + "cached_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "system_fingerprint": "fp_6b68a8204b" + } + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned + by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the + same ID. + choices: + type: array + description: > + A list of chat completion choices. Can contain more than one + elements if `n` is greater than 1. Can also be empty for the + + last chunk if you set `stream_options: {"include_usage": true}`. + items: type: object - properties: - object: - enum: - - organization.user.deleted - type: string - id: - type: string - deleted: - type: boolean - Project: required: - - id - - object - - name - - created_at - - status - type: object + - delta + - finish_reason + - index properties: - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - object: - enum: - - organization.project - type: string - description: 'The object type, which is always `organization.project`' - name: - type: string - description: The name of the project. This appears in reporting. - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was created. - archived_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was archived or `null`. + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" nullable: true - status: - enum: - - active - - archived - type: string - description: '`active` or `archived`' - description: Represents an individual project. - x-oaiMeta: - name: The project object - example: "{\n \"id\": \"proj_abc\",\n \"object\": \"organization.project\",\n \"name\": \"Project example\",\n \"created_at\": 1711471533,\n \"archived_at\": null,\n \"status\": \"active\"\n}\n" - ProjectListResponse: - required: - - object - - data - - first_id - - last_id - - has_more - type: object - properties: - object: - enum: - - list - type: string - data: + refusal: + description: A list of message refusal tokens with log probability information. type: array items: - $ref: '#/components/schemas/Project' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectCreateRequest: - required: - - name - type: object - properties: - name: - type: string - description: 'The friendly name of the project, this name appears in reports.' - ProjectUpdateRequest: - required: - - name - type: object - properties: - name: - type: string - description: 'The updated name of the project, this name appears in reports.' - DefaultProjectErrorResponse: - required: - - code - - message - type: object - properties: - code: - type: integer - message: - type: string - ProjectUser: - required: - - object - - id - - name - - email - - role - - added_at + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + - refusal + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, + + `length` if the maximum number of tokens specified in the + request was reached, + + `content_filter` if content was omitted due to a flag from our + content filters, + + `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + enum: + - stop + - length + - tool_calls + - content_filter + - function_call + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was + created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + service_tier: + description: The service tier used for processing the request. This field is + only included if the `service_tier` parameter is specified in the + request. + type: string + enum: + - scale + - default + example: scale + nullable: true + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: + - chat.completion.chunk + usage: + type: object + nullable: true + description: > + An optional field that will only be present when you set + `stream_options: {"include_usage": true}` in your request. + + When present, it contains a null value except for the last chunk + which contains the token usage statistics for the entire request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: > + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + + + .... + + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4o-mini", + "system_fingerprint": "fp_44709d6fcb", + "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + CreateCompletionRequest: + type: object + properties: + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + anyOf: + - type: string + - type: string + enum: + - gpt-3.5-turbo-instruct + - davinci-002 + - babbage-002 + x-oaiTypeLabel: string + prompt: + description: > + The prompt(s) to generate completions for, encoded as a string, + array of strings, array of tokens, or array of token arrays. + + + Note that <|endoftext|> is the document separator that the model + sees during training, so if a prompt is not specified the model will + generate as if from the beginning of a new document. + default: <|endoftext|> + nullable: true + oneOf: + - type: string + default: "" + example: This is a test. + - type: array + items: + type: string + default: "" + example: This is a test. + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: > + Generates `best_of` completions server-side and returns the "best" + (the one with the highest log probability per token). Results cannot + be streamed. + + + When used with `n`, `best_of` controls the number of candidate + completions and `n` specifies how many to return – `best_of` must be + greater than `n`. + + + **Note:** Because this parameter generates many completions, it can + quickly consume your token quota. Use carefully and ensure that you + have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: | + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on their existing frequency in the text so far, decreasing the + model's likelihood to repeat the same line verbatim. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: > + Modify the likelihood of specified tokens appearing in the + completion. + + + Accepts a JSON object that maps tokens (specified by their token ID + in the GPT tokenizer) to an associated bias value from -100 to 100. + You can use this [tokenizer tool](/tokenizer?view=bpe) to convert + text to token IDs. Mathematically, the bias is added to the logits + generated by the model prior to sampling. The exact effect will vary + per model, but values between -1 and 1 should decrease or increase + likelihood of selection; values like -100 or 100 should result in a + ban or exclusive selection of the relevant token. + + + As an example, you can pass `{"50256": -100}` to prevent the + <|endoftext|> token from being generated. + logprobs: + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: > + Include the log probabilities on the `logprobs` most likely output + tokens, as well the chosen tokens. For example, if `logprobs` is 5, + the API will return a list of the 5 most likely tokens. The API will + always return the `logprob` of the sampled token, so there may be up + to `logprobs+1` elements in the response. + + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: > + The maximum number of [tokens](/tokenizer) that can be generated in + the completion. + + + The token count of your prompt plus `max_tokens` cannot exceed the + model's context length. [Example Python + code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: > + How many completions to generate for each prompt. + + + **Note:** Because this parameter generates many completions, it can + quickly consume your token quota. Use carefully and ensure that you + have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: > + Number between -2.0 and 2.0. Positive values penalize new tokens + based on whether they appear in the text so far, increasing the + model's likelihood to talk about new topics. + + + [See more information about frequency and presence + penalties.](/docs/guides/text-generation) + seed: + type: integer + minimum: -9223372036854776000 + maximum: 9223372036854776000 + nullable: true + description: > + If specified, our system will make a best effort to sample + deterministically, such that repeated requests with the same `seed` + and parameters should return the same result. + + + Determinism is not guaranteed, and you should refer to the + `system_fingerprint` response parameter to monitor changes in the + backend. + stop: + description: > + Up to 4 sequences where the API will stop generating further tokens. + The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent + as data-only [server-sent + events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: + [DONE]` message. [Example Python + code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true + type: string + example: test. + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or `temperature` but not both. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - model + - prompt + CreateCompletionResponse: + type: object + description: > + Represents a completion response from the API. Note: both the streamed + and non-streamed response objects share the same shape (unlike the chat + endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input + prompt. + items: type: object - properties: - object: - enum: - - organization.project.user - type: string - description: 'The object type, which is always `organization.project.user`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the user - email: - type: string - description: The email address of the user - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - added_at: - type: integer - description: The Unix timestamp (in seconds) of when the project was added. - description: Represents an individual user in a project. - x-oaiMeta: - name: The project user object - example: "{\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n}\n" - ProjectUserListResponse: required: - - object - - data - - first_id - - last_id - - has_more - type: object + - finish_reason + - index + - logprobs + - text properties: - object: - type: string - data: + finish_reason: + type: string + description: > + The reason the model stopped generating tokens. This will be + `stop` if the model hit a natural stop point or a provided + stop sequence, + + `length` if the maximum number of tokens specified in the + request was reached, + + or `content_filter` if content was omitted due to a flag from + our content filters. + enum: + - stop + - length + - content_filter + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: type: array items: - $ref: '#/components/schemas/ProjectUser' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectUserCreateRequest: - required: - - user_id - - role - type: object - properties: - user_id: - type: string - description: The ID of the user. - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - ProjectUserUpdateRequest: - required: - - role + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: > + This fingerprint represents the backend configuration that the model + runs with. + + + Can be used in conjunction with the `seed` request parameter to + understand when backend changes have been made that might impact + determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: + - text_completion + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + CreateEmbeddingRequest: + type: object + additionalProperties: false + properties: + input: + description: > + Input text to embed, encoded as a string or array of tokens. To + embed multiple inputs in a single request, pass an array of strings + or array of token arrays. The input must not exceed the max input + tokens for the model (8192 tokens for `text-embedding-ada-002`), + cannot be an empty string, and any array must be 2048 dimensions or + less. [Example Python + code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + for counting tokens. + example: The quick brown fox jumped over the lazy dog + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: This is a test. + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: string + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an + embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + example: text-embedding-3-small + anyOf: + - type: string + - type: string + enum: + - text-embedding-ada-002 + - text-embedding-3-small + - text-embedding-3-large + x-oaiTypeLabel: string + encoding_format: + description: The format to return the embeddings in. Can be either `float` or + [`base64`](https://pypi.org/project/pybase64/). + example: float + default: float + type: string + enum: + - float + - base64 + dimensions: + description: > + The number of dimensions the resulting output embeddings should + have. Only supported in `text-embedding-3` and later models. + type: integer + minimum: 1 + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - model + - input + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + enum: + - list + usage: + type: object + description: The usage information for the request. + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + required: + - object + - model + - data + - usage + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The File object (not file name) to be uploaded. + type: string + format: binary + purpose: + description: > + The intended purpose of the uploaded file. + + + Use "assistants" for [Assistants](/docs/api-reference/assistants) + and [Message](/docs/api-reference/messages) files, "vision" for + Assistants image file inputs, "batch" for [Batch + API](/docs/guides/batch), and "fine-tune" for + [Fine-tuning](/docs/api-reference/fine-tuning). + type: string + enum: + - assistants + - batch + - fine-tune + - vision + required: + - file + - purpose + CreateFineTuningJobRequest: + type: object + properties: + model: + description: > + The name of the model to fine-tune. You can select one of the + + [supported + models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned). + example: gpt-4o-mini + anyOf: + - type: string + - type: string + enum: + - babbage-002 + - davinci-002 + - gpt-3.5-turbo + - gpt-4o-mini + x-oaiTypeLabel: string + training_file: + description: > + The ID of an uploaded file that contains training data. + + + See [upload file](/docs/api-reference/files/create) for how to + upload a file. + + + Your dataset must be formatted as a JSONL file. Additionally, you + must upload your file with the purpose `fine-tune`. + + + The contents of the file should differ depending on if the model + uses the [chat](/docs/api-reference/fine-tuning/chat-input) or + [completions](/docs/api-reference/fine-tuning/completions-input) + format. + + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more + details. + type: string + example: file-abc123 + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: > + Number of examples in each batch. A larger batch size means that + model parameters + + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: + - auto + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: > + Scaling factor for the learning rate. A smaller learning rate + may be useful to avoid + + overfitting. + oneOf: + - type: string + enum: + - auto + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: > + The number of epochs to train the model for. An epoch refers to + one full cycle + + through the training dataset. + oneOf: + - type: string + enum: + - auto + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: > + A string of up to 64 characters that will be added to your + fine-tuned model name. + + + For example, a `suffix` of "custom-model-name" would produce a model + name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 64 + default: null + nullable: true + validation_file: + description: > + The ID of an uploaded file that contains validation data. + + + If you provide this file, the data is used to generate validation + + metrics periodically during fine-tuning. These metrics can be viewed + in + + the fine-tuning results file. + + The same data should not be present in both train and validation + files. + + + Your dataset must be formatted as a JSONL file. You must upload your + file with the purpose `fine-tune`. + + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more + details. + type: string + nullable: true + example: file-abc123 + integrations: + type: array + description: A list of integrations to enable for your fine-tuning job. + nullable: true + items: type: object - properties: - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - ProjectUserDeleteResponse: required: - - object - - id - - deleted - type: object + - type + - wandb properties: - object: + type: + description: > + The type of integration to enable. Currently, only "wandb" + (Weights and Biases) is supported. + oneOf: + - type: string enum: - - organization.project.user.deleted + - wandb + wandb: + type: object + description: > + The settings for your integration with Weights and Biases. + This payload specifies the project that + + metrics will be sent to. Optionally, you can set an explicit + display name for your run, add tags + + to your run, and set a default entity (team, username, etc) to + be associated with your run. + required: + - project + properties: + project: + description: > + The name of the project that the new run will be created + under. + type: string + example: my-wandb-project + name: + description: > + A display name to set for the run. If not set, we will use + the Job ID as the name. + nullable: true type: string - id: + entity: + description: > + The entity to use for the run. This allows you to set the + team or username of the WandB user that you would + + like associated with the run. If not set, the default + entity for the registered WandB API key is used. + nullable: true type: string - deleted: - type: boolean - ProjectServiceAccount: - required: - - object - - id - - name - - role - - created_at + tags: + description: > + A list of tags to be attached to the newly created run. + These tags are passed through directly to WandB. Some + + default tags are generated by OpenAI: "openai/finetune", + "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: custom-tag + seed: + description: > + The seed controls the reproducibility of the job. Passing in the + same seed and job parameters should produce the same results, but + may differ in rare cases. + + If a seed is not specified, one will be generated for you. + type: integer + nullable: true + minimum: 0 + maximum: 2147483647 + example: 42 + required: + - model + - training_file + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and + square. If mask is not provided, image must have transparency, which + will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is + 1000 characters. + type: string + example: A cute baby sea otter wearing a beret + mask: + description: An additional image whose fully transparent areas (e.g. where alpha + is zero) indicate where `image` should be edited. Must be a valid + PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + x-oaiTypeLabel: string + default: dall-e-2 + example: dall-e-2 + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported + at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + default: 1024x1024 + example: 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, + `512x512`, or `1024x1024`. + response_format: + type: string + enum: + - url + - b64_json + default: url + example: url + nullable: true + description: The format in which the generated images are returned. Must be one + of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - prompt + - image + CreateImageRequest: + type: object + properties: + prompt: + description: A text description of the desired image(s). The maximum length is + 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + type: string + example: A cute baby sea otter + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + - dall-e-3 + x-oaiTypeLabel: string + default: dall-e-2 + example: dall-e-3 + nullable: true + description: The model to use for image generation. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For + `dall-e-3`, only `n=1` is supported. + quality: + type: string + enum: + - standard + - hd + default: standard + example: standard + description: The quality of the image that will be generated. `hd` creates + images with finer details and greater consistency across the image. + This param is only supported for `dall-e-3`. + response_format: + type: string + enum: + - url + - b64_json + default: url + example: url + nullable: true + description: The format in which the generated images are returned. Must be one + of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + - 1792x1024 + - 1024x1792 + default: 1024x1024 + example: 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, + `512x512`, or `1024x1024` for `dall-e-2`. Must be one of + `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: + type: string + enum: + - vivid + - natural + default: vivid + example: vivid + nullable: true + description: The style of the generated images. Must be one of `vivid` or + `natural`. Vivid causes the model to lean towards generating + hyper-real and dramatic images. Natural causes the model to produce + more natural, less hyper-real looking images. This param is only + supported for `dall-e-3`. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - prompt + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid + PNG file, less than 4MB, and square. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: + - dall-e-2 + x-oaiTypeLabel: string + default: dall-e-2 + example: dall-e-2 + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported + at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For + `dall-e-3`, only `n=1` is supported. + response_format: + type: string + enum: + - url + - b64_json + default: url + example: url + nullable: true + description: The format in which the generated images are returned. Must be one + of `url` or `b64_json`. URLs are only valid for 60 minutes after the + image has been generated. + size: + type: string + enum: + - 256x256 + - 512x512 + - 1024x1024 + default: 1024x1024 + example: 1024x1024 + nullable: true + description: The size of the generated images. Must be one of `256x256`, + `512x512`, or `1024x1024`. + user: + type: string + example: user-1234 + description: > + A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. [Learn + more](/docs/guides/safety-best-practices#end-user-ids). + required: + - image + CreateMessageRequest: + type: object + additionalProperties: false + required: + - role + - content + properties: + role: + type: string + enum: + - user + - assistant + description: > + The role of the entity that is creating the message. Allowed values + include: + + - `user`: Indicates the message is sent by an actual user and should + be used in most cases to represent user-generated messages. + + - `assistant`: Indicates the message is generated by the assistant. + Use this value to insert messages from the assistant into the + conversation. + content: + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type + `text` or images can be passed with `image_url` or `image_file`. + Image types are only supported on [Vision-compatible + models](/docs/models). + title: Array of content parts + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageRequestContentTextObject" + x-oaiExpandable: true + minItems: 1 + x-oaiExpandable: true + attachments: + type: array + items: type: object properties: - object: - enum: - - organization.project.service_account - type: string - description: 'The object type, which is always `organization.project.service_account`' - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - name: - type: string - description: The name of the service account - role: - enum: - - owner - - member - type: string - description: '`owner` or `member`' - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the service account was created - description: Represents an individual service account in a project. - x-oaiMeta: - name: The project service account object - example: "{\n \"object\": \"organization.project.service_account\",\n \"id\": \"svc_acct_abc\",\n \"name\": \"Service Account\",\n \"role\": \"owner\",\n \"created_at\": 1711471533\n}\n" - ProjectServiceAccountListResponse: - required: - - object - - data - - first_id - - last_id - - has_more + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should + be added to. + required: + - file_id + - tools + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + CreateModerationRequest: + type: object + properties: + input: + description: > + Input (or inputs) to classify. Can be a single string, an array of + strings, or + + an array of multi-modal input objects similar to other models. + oneOf: + - type: string + description: A string of text to classify for moderation. + default: "" + example: I want to kill them. + - type: array + description: An array of strings to classify for moderation. + items: + type: string + default: "" + example: I want to kill them. + - type: array + description: An array of multi-modal inputs to the moderation model. + items: + x-oaiExpandable: true + oneOf: + - type: object + description: An object describing an image to classify. + properties: + type: + description: Always `image_url`. + type: string + enum: + - image_url + image_url: + type: object + description: Contains either an image URL or a data URL for a base64 encoded + image. + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + example: https://example.com/image.jpg + required: + - url + required: + - type + - image_url + - type: object + description: An object describing text to classify. + properties: + type: + description: Always `text`. + type: string + enum: + - text + text: + description: A string of text to classify. + type: string + example: I want to kill them + required: + - type + - text + x-oaiExpandable: true + model: + description: | + The content moderation model you would like to use. Learn more in + [the moderation guide](/docs/guides/moderation), and learn about + available models [here](/docs/models#moderation). + nullable: false + default: omni-moderation-latest + example: omni-moderation-2024-09-26 + anyOf: + - type: string + - type: string + enum: + - omni-moderation-latest + - omni-moderation-2024-09-26 + - text-moderation-latest + - text-moderation-stable + x-oaiTypeLabel: string + required: + - input + CreateModerationResponse: + type: object + description: Represents if a given text input is potentially harmful. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: type: object properties: - object: - enum: - - list - type: string - data: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, + gender, ethnicity, religion, nationality, sexual + orientation, disability status, or caste. Hateful content + aimed at non-protected groups (e.g., chess players) is + harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards + the targeted group based on race, gender, ethnicity, + religion, nationality, sexual orientation, disability + status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language + towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm + towards any target. + illicit: + type: boolean + description: Content that includes instructions or advice that facilitate the + planning or execution of wrongdoing, or that gives advice + or instruction on how to commit illicit acts. For example, + "how to shoplift" would fit this category. + illicit/violent: + type: boolean + description: Content that includes instructions or advice that facilitate the + planning or execution of wrongdoing that also includes + violence, or that gives advice or instruction on the + procurement of any weapon. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, + such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or + intend to engage in acts of self-harm, such as suicide, + cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as + suicide, cutting, and eating disorders, or that gives + instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description + of sexual activity, or that promotes sexual services + (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years + old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic + detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by + model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + illicit: + type: number + description: The score for the category 'illicit'. + illicit/violent: + type: number + description: The score for the category 'illicit/violent'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_applied_input_types: + type: object + description: A list of the categories along with the input type(s) that the + score applies to. + properties: + hate: type: array + description: The applied input type(s) for the category 'hate'. items: - $ref: '#/components/schemas/ProjectServiceAccount' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectServiceAccountCreateRequest: - required: - - name - type: object - properties: - name: - type: string - description: The name of the service account being created. - ProjectServiceAccountCreateResponse: + type: string + enum: + - text + hate/threatening: + type: array + description: The applied input type(s) for the category 'hate/threatening'. + items: + type: string + enum: + - text + harassment: + type: array + description: The applied input type(s) for the category 'harassment'. + items: + type: string + enum: + - text + harassment/threatening: + type: array + description: The applied input type(s) for the category + 'harassment/threatening'. + items: + type: string + enum: + - text + illicit: + type: array + description: The applied input type(s) for the category 'illicit'. + items: + type: string + enum: + - text + illicit/violent: + type: array + description: The applied input type(s) for the category 'illicit/violent'. + items: + type: string + enum: + - text + self-harm: + type: array + description: The applied input type(s) for the category 'self-harm'. + items: + type: string + enum: + - text + - image + self-harm/intent: + type: array + description: The applied input type(s) for the category 'self-harm/intent'. + items: + type: string + enum: + - text + - image + self-harm/instructions: + type: array + description: The applied input type(s) for the category + 'self-harm/instructions'. + items: + type: string + enum: + - text + - image + sexual: + type: array + description: The applied input type(s) for the category 'sexual'. + items: + type: string + enum: + - text + - image + sexual/minors: + type: array + description: The applied input type(s) for the category 'sexual/minors'. + items: + type: string + enum: + - text + violence: + type: array + description: The applied input type(s) for the category 'violence'. + items: + type: string + enum: + - text + - image + violence/graphic: + type: array + description: The applied input type(s) for the category 'violence/graphic'. + items: + type: string + enum: + - text + - image + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - illicit + - illicit/violent + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic required: - - object - - id - - name - - role - - created_at - - api_key + - flagged + - categories + - category_scores + - category_applied_input_types + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: | + { + "id": "modr-0d9740456c391e43c445bf0f010940c7", + "model": "omni-moderation-latest", + "results": [ + { + "flagged": true, + "categories": { + "harassment": true, + "harassment/threatening": true, + "sexual": false, + "hate": false, + "hate/threatening": false, + "illicit": false, + "illicit/violent": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "self-harm": false, + "sexual/minors": false, + "violence": true, + "violence/graphic": true + }, + "category_scores": { + "harassment": 0.8189693396524255, + "harassment/threatening": 0.804985420696006, + "sexual": 1.573112165348997e-6, + "hate": 0.007562942636942845, + "hate/threatening": 0.004208854591835476, + "illicit": 0.030535955153511665, + "illicit/violent": 0.008925306722380033, + "self-harm/intent": 0.00023023930975076432, + "self-harm/instructions": 0.0002293869201073356, + "self-harm": 0.012598046106750154, + "sexual/minors": 2.212566909570261e-8, + "violence": 0.9999992735124786, + "violence/graphic": 0.843064871157054 + }, + "category_applied_input_types": { + "harassment": [ + "text" + ], + "harassment/threatening": [ + "text" + ], + "sexual": [ + "text", + "image" + ], + "hate": [ + "text" + ], + "hate/threatening": [ + "text" + ], + "illicit": [ + "text" + ], + "illicit/violent": [ + "text" + ], + "self-harm/intent": [ + "text", + "image" + ], + "self-harm/instructions": [ + "text", + "image" + ], + "self-harm": [ + "text", + "image" + ], + "sexual/minors": [ + "text" + ], + "violence": [ + "text", + "image" + ], + "violence/graphic": [ + "text", + "image" + ] + } + } + ] + } + CreateRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to + execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to + execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated + with the assistant will be used. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the + [instructions](/docs/api-reference/assistants/createAssistant) of + the assistant. This is useful for modifying the behavior on a + per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for + the run. This is useful for modifying the behavior on a per-run + basis without overriding other instructions. + type: string + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is + useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + stream: + type: boolean + nullable: true + description: > + If `true`, returns a stream of events that happen during the Run as + server-sent events, terminating when the Run enters a terminal state + with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: > + The maximum number of prompt tokens that may be used over the course + of the run. The run will make a best effort to use only the number + of prompt tokens specified, across multiple turns of the run. If the + run exceeds the number of prompt tokens specified, the run will end + with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: > + The maximum number of completion tokens that may be used over the + course of the run. The run will make a best effort to use only the + number of completion tokens specified, across multiple turns of the + run. If the run exceeds the number of completion tokens specified, + the run will end with status `incomplete`. See `incomplete_details` + for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - assistant_id + CreateSpeechRequest: + type: object + additionalProperties: false + properties: + model: + description: > + One of the available [TTS models](/docs/models#tts): `tts-1` or + `tts-1-hd` + anyOf: + - type: string + - type: string + enum: + - tts-1 + - tts-1-hd + x-oaiTypeLabel: string + input: + type: string + description: The text to generate audio for. The maximum length is 4096 + characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are + `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of + the voices are available in the [Text to speech + guide](/docs/guides/text-to-speech#voice-options). + type: string + enum: + - alloy + - echo + - fable + - onyx + - nova + - shimmer + response_format: + description: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, + `flac`, `wav`, and `pcm`. + default: mp3 + type: string + enum: + - mp3 + - opus + - aac + - flac + - wav + - pcm + speed: + description: The speed of the generated audio. Select a value from `0.25` to + `4.0`. `1.0` is the default. + type: number + default: 1 + minimum: 0.25 + maximum: 4 + required: + - model + - input + - voice + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to + execute this run. + type: string + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to + execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated + with the assistant will be used. + example: gpt-4o + anyOf: + - type: string + - type: string + enum: + - gpt-4o + - gpt-4o-2024-11-20 + - gpt-4o-2024-08-06 + - gpt-4o-2024-05-13 + - gpt-4o-mini + - gpt-4o-mini-2024-07-18 + - gpt-4-turbo + - gpt-4-turbo-2024-04-09 + - gpt-4-0125-preview + - gpt-4-turbo-preview + - gpt-4-1106-preview + - gpt-4-vision-preview + - gpt-4 + - gpt-4-0314 + - gpt-4-0613 + - gpt-4-32k + - gpt-4-32k-0314 + - gpt-4-32k-0613 + - gpt-3.5-turbo + - gpt-3.5-turbo-16k + - gpt-3.5-turbo-0613 + - gpt-3.5-turbo-1106 + - gpt-3.5-turbo-0125 + - gpt-3.5-turbo-16k-0613 + x-oaiTypeLabel: string + nullable: true + instructions: + description: Override the default system message of the assistant. This is + useful for modifying the behavior on a per-run basis. + type: string + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is + useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The ID of the [vector + store](/docs/api-reference/vector-stores/object) attached to + this assistant. There can be a maximum of 1 vector store + attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + stream: + type: boolean + nullable: true + description: > + If `true`, returns a stream of events that happen during the Run as + server-sent events, terminating when the Run enters a terminal state + with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: > + The maximum number of prompt tokens that may be used over the course + of the run. The run will make a best effort to use only the number + of prompt tokens specified, across multiple turns of the run. If the + run exceeds the number of prompt tokens specified, the run will end + with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: > + The maximum number of completion tokens that may be used over the + course of the run. The run will make a best effort to use only the + number of completion tokens specified, across multiple turns of the + run. If the run exceeds the number of completion tokens specified, + the run will end with status `incomplete`. See `incomplete_details` + for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - assistant_id + CreateThreadRequest: + type: object + additionalProperties: false + properties: + messages: + description: A list of [messages](/docs/api-reference/messages) to start the + thread with. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + tool_resources: + type: object + description: > + A set of resources that are made available to the assistant's tools + in this thread. The resources are specific to the type of tool. For + example, the `code_interpreter` tool requires a list of file IDs, + while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector + store attached to the thread. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: > + A helper to create a [vector + store](/docs/api-reference/vector-stores/object) with + file_ids and attach it to this thread. There can be a + maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs to add + to the vector store. There can be a maximum of 10000 + files in a vector store. + maxItems: 10000 + items: + type: string + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. + oneOf: + - type: object + title: Auto Chunking Strategy + description: The default strategy. This strategy currently uses a + `max_chunk_size_tokens` of `800` and + `chunk_overlap_tokens` of `400`. + additionalProperties: false + properties: + type: + type: string + description: Always `auto`. + enum: + - auto + required: + - type + - type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: + - static + static: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is + `800`. The minimum value is `100` and the + maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: > + The number of tokens that overlap between + chunks. The default value is `400`. + + + Note that the overlap must not exceed half + of `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + required: + - type + - static + x-oaiExpandable: true + metadata: + type: object + description: > + Set of 16 key-value pairs that can be attached to a + vector store. This can be useful for storing + additional information about the vector store in a + structured format. Keys can be a maximum of 64 + characters long and values can be a maximum of 512 + characters long. + x-oaiTypeLabel: map + x-oaiExpandable: true + oneOf: + - required: + - vector_store_ids + - required: + - vector_stores + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + CreateTranscriptionRequest: + type: object + additionalProperties: false + properties: + file: + description: > + The audio file object (not file name) to transcribe, in one of these + formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: > + ID of the model to use. Only `whisper-1` (which is powered by our + open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: + - whisper-1 + x-oaiTypeLabel: string + language: + description: > + The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) + format will improve accuracy and latency. + type: string + prompt: + description: > + An optional text to guide the model's style or continue a previous + audio segment. The [prompt](/docs/guides/speech-to-text#prompting) + should match the audio language. + type: string + response_format: + $ref: "#/components/schemas/AudioResponseFormat" + temperature: + description: > + The sampling temperature, between 0 and 1. Higher values like 0.8 + will make the output more random, while lower values like 0.2 will + make it more focused and deterministic. If set to 0, the model will + use [log probability](https://en.wikipedia.org/wiki/Log_probability) + to automatically increase the temperature until certain thresholds + are hit. + type: number + default: 0 + timestamp_granularities[]: + description: > + The timestamp granularities to populate for this transcription. + `response_format` must be set `verbose_json` to use timestamp + granularities. Either or both of these options are supported: + `word`, or `segment`. Note: There is no additional latency for + segment timestamps, but generating word timestamps incurs additional + latency. + type: array + items: + type: string + enum: + - word + - segment + default: + - segment + required: + - file + - model + CreateTranscriptionResponseJson: + type: object + description: Represents a transcription response returned by model, based on the + provided input. + properties: + text: + type: string + description: The transcribed text. + required: + - text + x-oaiMeta: + name: The transcription object (JSON) + group: audio + example: > + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + CreateTranscriptionResponseVerboseJson: + type: object + description: Represents a verbose json transcription response returned by model, + based on the provided input. + properties: + language: + type: string + description: The language of the input audio. + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: + - language + - duration + - text + x-oaiMeta: + name: The transcription object (Verbose JSON) + group: audio + example: > + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "segments": [ + { + "id": 0, + "seek": 0, + "start": 0.0, + "end": 3.319999933242798, + "text": " The beach was a popular spot on a hot summer day.", + "tokens": [ + 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530 + ], + "temperature": 0.0, + "avg_logprob": -0.2860786020755768, + "compression_ratio": 1.2363636493682861, + "no_speech_prob": 0.00985979475080967 + }, + ... + ] + } + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: > + The audio file object (not file name) translate, in one of these + formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: > + ID of the model to use. Only `whisper-1` (which is powered by our + open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: + - whisper-1 + x-oaiTypeLabel: string + prompt: + description: > + An optional text to guide the model's style or continue a previous + audio segment. The [prompt](/docs/guides/speech-to-text#prompting) + should be in English. + type: string + response_format: + $ref: "#/components/schemas/AudioResponseFormat" + temperature: + description: > + The sampling temperature, between 0 and 1. Higher values like 0.8 + will make the output more random, while lower values like 0.2 will + make it more focused and deterministic. If set to 0, the model will + use [log probability](https://en.wikipedia.org/wiki/Log_probability) + to automatically increase the temperature until certain thresholds + are hit. + type: number + default: 0 + required: + - file + - model + CreateTranslationResponseJson: + type: object + properties: + text: + type: string + required: + - text + CreateTranslationResponseVerboseJson: + type: object + properties: + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: + - language + - duration + - text + CreateUploadRequest: + type: object + additionalProperties: false + properties: + filename: + description: | + The name of the file to upload. + type: string + purpose: + description: > + The intended purpose of the uploaded file. + + + See the [documentation on File + purposes](/docs/api-reference/files/create#files-create-purpose). + type: string + enum: + - assistants + - batch + - fine-tune + - vision + bytes: + description: | + The number of bytes in the file you are uploading. + type: integer + mime_type: + description: > + The MIME type of the file. + + + This must fall within the supported MIME types for your file + purpose. See the supported MIME types for assistants and vision. + type: string + required: + - filename + - purpose + - bytes + - mime_type + CreateVectorStoreFileBatchRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector + store should use. Useful for tools like `file_search` that can + access files. + type: array + minItems: 1 + maxItems: 500 + items: + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_ids + CreateVectorStoreFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should + use. Useful for tools like `file_search` that can access files. + type: string + chunking_strategy: + $ref: "#/components/schemas/ChunkingStrategyRequestParam" + required: + - file_id + CreateVectorStoreRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector + store should use. Useful for tools like `file_search` that can + access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. + type: string + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + chunking_strategy: + type: object + description: The chunking strategy used to chunk the file(s). If not set, will + use the `auto` strategy. Only applicable if `file_ids` is non-empty. + oneOf: + - $ref: "#/components/schemas/AutoChunkingStrategyRequestParam" + - $ref: "#/components/schemas/StaticChunkingStrategyRequestParam" + x-oaiExpandable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + DefaultProjectErrorResponse: + type: object + properties: + code: + type: integer + message: + type: string + required: + - code + - message + DeleteAssistantResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - assistant.deleted + required: + - id + - object + - deleted + DeleteFileResponse: + type: object + properties: + id: + type: string + object: + type: string + enum: + - file + deleted: + type: boolean + required: + - id + - object + - deleted + DeleteMessageResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - thread.message.deleted + required: + - id + - object + - deleted + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + DeleteThreadResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - thread.deleted + required: + - id + - object + - deleted + DeleteVectorStoreFileResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - vector_store.file.deleted + required: + - id + - object + - deleted + DeleteVectorStoreResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: + - vector_store.deleted + required: + - id + - object + - deleted + DoneEvent: + type: object + properties: + event: + type: string + enum: + - done + data: + type: string + enum: + - "[DONE]" + required: + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array + description: > + The embedding vector, which is a list of floats. The length of + vector depends on the model as listed in the [embedding + guide](/docs/guides/embeddings). + items: + type: number + object: + type: string + description: The object type, which is always "embedding". + enum: + - embedding + required: + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorEvent: + type: object + properties: + event: + type: string + enum: + - error + data: + $ref: "#/components/schemas/Error" + required: + - event + - data + description: Occurs when an [error](/docs/guides/error-codes#api-errors) occurs. + This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes#api-errors)" + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + FileSearchRankingOptions: + title: File search tool call ranking options + type: object + description: > + The ranking options for the file search. If not specified, the file + search tool will use the `auto` ranker and a score_threshold of 0. + + + See the [file search tool + documentation](/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + properties: + ranker: + type: string + description: The ranker to use for the file search. If not specified will use + the `auto` ranker. + enum: + - auto + - default_2024_08_21 + score_threshold: + type: number + description: The score threshold for the file search. All values must be a + floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - score_threshold + FineTuneChatCompletionRequestAssistantMessage: + allOf: + - type: object + title: Assistant message + deprecated: false + properties: + weight: + type: integer + enum: + - 0 + - 1 + description: Controls whether the assistant message is trained against (0 or 1) + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + required: + - role + FineTuningIntegration: + type: object + title: Fine-Tuning Job Integration + required: + - type + - wandb + properties: + type: + type: string + description: The type of the integration being enabled for the fine-tuning job + enum: + - wandb + wandb: + type: object + description: > + The settings for your integration with Weights and Biases. This + payload specifies the project that + + metrics will be sent to. Optionally, you can set an explicit display + name for your run, add tags + + to your run, and set a default entity (team, username, etc) to be + associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: my-wandb-project + name: + description: > + A display name to set for the run. If not set, we will use the + Job ID as the name. + nullable: true + type: string + entity: + description: > + The entity to use for the run. This allows you to set the team + or username of the WandB user that you would + + like associated with the run. If not set, the default entity for + the registered WandB API key is used. + nullable: true + type: string + tags: + description: > + A list of tags to be attached to the newly created run. These + tags are passed through directly to WandB. Some + + default tags are generated by OpenAI: "openai/finetune", + "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: custom-tag + FineTuningJob: + type: object + title: FineTuningJob + description: > + The `fine_tuning.job` object represents a fine-tuning job that has been + created through the API. + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was + created. + error: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more + information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or + `validation_file`. This field will be null if the failure was + not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. The value + will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was + finished. The value will be null if the fine-tuning job is still + running. + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. See the + [fine-tuning guide](/docs/guides/fine-tuning) for more details. + properties: + n_epochs: + oneOf: + - type: string + enum: + - auto + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: >- + The number of epochs to train the model for. An epoch refers to + one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of + the dataset. If setting the number manually, we support any + number between 1 and 50 epochs. + required: + - n_epochs + model: + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: + - fine_tuning.job + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can + retrieve the results with the [Files + API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either + `validating_files`, `queued`, `running`, `succeeded`, `failed`, or + `cancelled`. + enum: + - validating_files + - queued + - running + - succeeded + - failed + - cancelled + trained_tokens: + type: integer + nullable: true + description: The total number of billable tokens processed by this fine-tuning + job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data + with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: The file ID used for validation. You can retrieve the validation + results with the [Files + API](/docs/api-reference/files/retrieve-contents). + integrations: + type: array + nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 + items: + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true + seed: + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job is + estimated to finish. The value will be null if the fine-tuning job + is not running. + required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "davinci-002", + "created_at": 1692661014, + "finished_at": 1692661190, + "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", + "organization_id": "org-123", + "result_files": [ + "file-abc123" + ], + "status": "succeeded", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": { + "n_epochs": 4, + "batch_size": 1, + "learning_rate_multiplier": 1.0 + }, + "trained_tokens": 5768, + "integrations": [], + "seed": 0, + "estimated_finish": 0 + } + FineTuningJobCheckpoint: + type: object + title: FineTuningJobCheckpoint + description: > + The `fine_tuning.job.checkpoint` object represents a model checkpoint + for a fine-tuning job that is ready to use. + properties: + id: + type: string + description: The checkpoint identifier, which can be referenced in the API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: + type: string + description: The name of the fine-tuning job that this checkpoint was created + from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: + - fine_tuning.job.checkpoint + required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics + - object + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: > + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } + FineTuningJobEvent: + type: object + description: Fine-tuning job event object + properties: + id: + type: string + created_at: + type: integer + level: + type: string + enum: + - info + - warn + - error + message: + type: string + object: + type: string + enum: + - fine_tuning.job.event + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } + FinetuneChatRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for chat models + properties: + messages: + type: array + minItems: 1 + items: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/FineTuneChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + tools: + type: array + description: A list of tools the model may generate JSON inputs for. + items: + $ref: "#/components/schemas/ChatCompletionTool" + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + functions: + deprecated: true + description: A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + x-oaiMeta: + name: Training format for chat models + example: > + { + "messages": [ + { "role": "user", "content": "What is the weather in San Francisco?" }, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_id", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\": \"San Francisco, USA\", \"format\": \"celsius\"}" + } + } + ] + } + ], + "parallel_tool_calls": false, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and country, eg. San Francisco, USA" + }, + "format": { "type": "string", "enum": ["celsius", "fahrenheit"] } + }, + "required": ["location", "format"] + } + } + } + ] + } + FinetuneCompletionRequestInput: + type: object + description: The per-line training example of a fine-tuning input file for + completions models + properties: + prompt: + type: string + description: The input prompt for this training example. + completion: + type: string + description: The desired completion for this training example. + x-oaiMeta: + name: Training format for completions models + example: | + { + "prompt": "What is the answer to 2+2", + "completion": "4" + } + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to + choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or + contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the + function call. If set to true, the model will follow the exact + schema defined in the `parameters` field. Only a subset of JSON + Schema is supported when `strict` is `true`. Learn more about + Structured Outputs in the [function calling + guide](docs/guides/function-calling). + required: + - name + FunctionParameters: + type: object + description: >- + The parameters the functions accepts, described as a JSON Schema object. + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema + reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + + Omitting `parameters` defines a function with an empty parameter list. + additionalProperties: true + Image: + type: object + description: Represents the url or the content of an image generated by the + OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if + `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` + (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any + revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } + ImagesResponse: + properties: + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + Invite: + type: object + description: Represents an individual `invite` to the organization. + properties: + object: + type: string + enum: + - organization.invite + description: The object type, which is always `organization.invite` + id: + type: string + description: The identifier, which can be referenced in API endpoints + email: + type: string + description: The email address of the individual to whom the invite was sent + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + status: + type: string + enum: + - accepted + - expired + - pending + description: "`accepted`,`expired`, or `pending`" + invited_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was sent. + expires_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite expires. + accepted_at: + type: integer + description: The Unix timestamp (in seconds) of when the invite was accepted. + required: + - object + - id + - email + - role + - status + - invited_at + - expires_at + x-oaiMeta: + name: The invite object + example: | + { + "object": "organization.invite", + "id": "invite-abc", + "email": "user@example.com", + "role": "owner", + "status": "accepted", + "invited_at": 1711471533, + "expires_at": 1711471533, + "accepted_at": 1711471533 + } + InviteDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.invite.deleted + description: The object type, which is always `organization.invite.deleted` + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + InviteListResponse: + type: object + properties: + object: + type: string + enum: + - list + description: The object type, which is always `list` + data: + type: array + items: + $ref: "#/components/schemas/Invite" + first_id: + type: string + description: The first `invite_id` in the retrieved `list` + last_id: + type: string + description: The last `invite_id` in the retrieved `list` + has_more: + type: boolean + description: The `has_more` property is used for pagination to indicate there + are additional results. + required: + - object + - data + InviteRequest: + type: object + properties: + email: + type: string + description: Send an email to this address + role: + type: string + enum: + - reader + - owner + description: "`owner` or `reader`" + required: + - email + - role + ListAssistantsResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: + type: string + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: > + { + "object": "list", + "data": [ + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4o", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4o", + "instructions": null, + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false + } + ListAuditLogsResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/AuditLog" + first_id: + type: string + example: audit_log-defb456h8dks + last_id: + type: string + example: audit_log-hnbkd8s93s + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ListBatchesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/Batch" + first_id: + type: string + example: batch_abc123 + last_id: + type: string + example: batch_abc456 + has_more: + type: boolean + object: + type: string + enum: + - list + required: + - object + - data + - has_more + ListFilesResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListFineTuningJobCheckpointsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + enum: + - list + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean + required: + - object + - data + - has_more + ListFineTuningJobEventsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + enum: + - list + required: + - object + - data + ListMessagesResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/MessageObject" + first_id: + type: string + example: msg_abc123 + last_id: + type: string + example: msg_abc123 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListModelsResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + ListPaginatedFineTuningJobsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + enum: + - list + required: + - object + - data + - has_more + ListRunStepsResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: step_abc123 + last_id: + type: string + example: step_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListRunsResponse: + type: object + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: + type: string + example: run_abc123 + last_id: + type: string + example: run_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListThreadsResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/ThreadObject" + first_id: + type: string + example: asst_abc123 + last_id: + type: string + example: asst_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListVectorStoreFilesResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreFileObject" + first_id: + type: string + example: file-abc123 + last_id: + type: string + example: file-abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ListVectorStoresResponse: + properties: + object: + type: string + example: list + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreObject" + first_id: + type: string + example: vs_abc123 + last_id: + type: string + example: vs_abc456 + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + MessageContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the + content of a message. + properties: + type: + description: Always `image_file`. + type: string + enum: + - image_file + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the + message content. Set `purpose="vision"` when uploading the File + if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. + `low` uses fewer tokens, you can opt in to high resolution using + `high`. + enum: + - auto + - low + - high + default: auto + required: + - file_id + required: + - type + - image_file + MessageContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + type: + type: string + enum: + - image_url + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: "The external URL of the image, must be a supported image types: + jpeg, jpg, png, gif, webp." + format: uri + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, + you can opt in to high resolution using `high`. Default value is + `auto` + enum: + - auto + - low + - high + default: auto + required: + - url + required: + - type + - image_url + MessageContentRefusalObject: + title: Refusal + type: object + description: The refusal content generated by the assistant. + properties: + type: + description: Always `refusal`. + type: string + enum: + - refusal + refusal: + type: string + nullable: false + required: + - type + - refusal + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from + a specific File associated with the assistant or the message. Generated + when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. + type: string + enum: + - file_citation + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_citation + - start_index + - end_index + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the + `code_interpreter` tool to generate a file. + properties: + type: + description: Always `file_path`. + type: string + enum: + - file_path + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_path + - start_index + - end_index + MessageContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + type: + description: Always `text`. + type: string + enum: + - text + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations + required: + - type + - text + MessageDeltaContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the + content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_file`. + type: string + enum: + - image_file + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the + message content. Set `purpose="vision"` when uploading the File + if you need to later display the file content. + type: string + detail: + type: string + description: Specifies the detail level of the image if specified by the user. + `low` uses fewer tokens, you can opt in to high resolution using + `high`. + enum: + - auto + - low + - high + default: auto + required: + - index + - type + MessageDeltaContentImageUrlObject: + title: Image URL + type: object + description: References an image URL in the content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_url`. + type: string + enum: + - image_url + image_url: + type: object + properties: + url: + description: "The URL of the image, must be a supported image types: jpeg, jpg, + png, gif, webp." + type: string + detail: + type: string + description: Specifies the detail level of the image. `low` uses fewer tokens, + you can opt in to high resolution using `high`. + enum: + - auto + - low + - high + default: auto + required: + - index + - type + MessageDeltaContentRefusalObject: + title: Refusal + type: object + description: The refusal content that is part of a message. + properties: + index: + type: integer + description: The index of the refusal part in the message. + type: + description: Always `refusal`. + type: string + enum: + - refusal + refusal: + type: string + required: + - index + - type + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from + a specific File associated with the assistant or the message. Generated + when the assistant uses the "file_search" tool to search files. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. + type: string + enum: + - file_citation + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the + `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: + - file_path + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + MessageDeltaContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. + type: string + enum: + - text + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObjec\ + t" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - index + - type + MessageDeltaObject: + type: object + title: Message delta object + description: > + Represents a message delta i.e. any changed fields on a message during + streaming. + properties: + id: + description: The identifier of the message, which can be referenced in API + endpoints. + type: string + object: + description: The object type, which is always `thread.message.delta`. + type: string + enum: + - thread.message.delta + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: + - user + - assistant + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + - $ref: "#/components/schemas/MessageDeltaContentRefusalObject" + - $ref: "#/components/schemas/MessageDeltaContentImageUrlObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + MessageObject: + type: object + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.message`. + type: string + enum: + - thread.message + created_at: + description: The Unix timestamp (in seconds) for when the message was created. + type: integer + thread_id: + description: The [thread](/docs/api-reference/threads) ID that this message + belongs to. + type: string + status: + description: The status of the message, which can be either `in_progress`, + `incomplete`, or `completed`. + type: string + enum: + - in_progress + - incomplete + - completed + incomplete_details: + description: On an incomplete message, details about why the message is + incomplete. + type: object + properties: + reason: + type: string + description: The reason the message is incomplete. + enum: + - content_filter + - max_tokens + - run_cancelled + - run_expired + - run_failed + nullable: true + required: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer + nullable: true + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as + incomplete. + type: integer + nullable: true + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: + - user + - assistant + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentImageUrlObject" + - $ref: "#/components/schemas/MessageContentTextObject" + - $ref: "#/components/schemas/MessageContentRefusalObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the + [assistant](/docs/api-reference/assistants) that authored this + message. + type: string + nullable: true + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the + creation of this message. Value is `null` when messages are created + manually using the create message or create thread endpoints. + type: string + nullable: true + attachments: + type: array + items: type: object properties: - object: - enum: - - organization.project.service_account - type: string - id: - type: string - name: - type: string - role: - enum: - - member - type: string - description: Service accounts can only have one role of type `member` - created_at: - type: integer - api_key: - $ref: '#/components/schemas/ProjectServiceAccountApiKey' - ProjectServiceAccountApiKey: - required: - - object - - value - - name - - created_at - - id + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearchTypeOnly" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were + added to. + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - created_at + - thread_id + - status + - incomplete_details + - completed_at + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments + - metadata + x-oaiMeta: + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} + } + MessageRequestContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + type: + description: Always `text`. + type: string + enum: + - text + text: + type: string + description: Text content to be sent to the model + required: + - type + - text + MessageStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: + - thread.message.created + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is + created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: + - thread.message.in_progress + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves + to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: + - thread.message.delta + data: + $ref: "#/components/schemas/MessageDeltaObject" + required: + - event + - data + description: Occurs when parts of a + [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message + delta](/docs/api-reference/assistants-streaming/message-delta-obj\ + ect)" + - type: object + properties: + event: + type: string + enum: + - thread.message.completed + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is + completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: + - thread.message.incomplete + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends + before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: + type: string + description: The object type, which is always "model". + enum: + - model + owned_by: + type: string + description: The organization that owns the model. + required: + - id + - object + - created + - owned_by + x-oaiMeta: + name: The model object + example: | + { + "id": "VAR_chat_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + } + ModifyAssistantRequest: + type: object + additionalProperties: false + properties: + model: + description: > + ID of the model to use. You can use the [List + models](/docs/api-reference/models/list) API to see all of your + available models, or see our [Model overview](/docs/models) for + descriptions of them. + anyOf: + - type: string + name: + description: | + The name of the assistant. The maximum length is 256 characters. + type: string + nullable: true + maxLength: 256 + description: + description: > + The description of the assistant. The maximum length is 512 + characters. + type: string + nullable: true + maxLength: 512 + instructions: + description: > + The system instructions that the assistant uses. The maximum length + is 256,000 characters. + type: string + nullable: true + maxLength: 256000 + tools: + description: > + A list of tool enabled on the assistant. There can be a maximum of + 128 tools per assistant. Tools can be of types `code_interpreter`, + `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: > + A set of resources that are used by the assistant's tools. The + resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the + `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + Overrides the list of [file](/docs/api-reference/files) IDs + made available to the `code_interpreter` tool. There can be + a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + Overrides the [vector + store](/docs/api-reference/vector-stores/object) attached to + this assistant. There can be a maximum of 1 vector store + attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: > + What sampling temperature to use, between 0 and 2. Higher values + like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: > + An alternative to sampling with temperature, called nucleus + sampling, where the model considers the results of the tokens with + top_p probability mass. So 0.1 means only the tokens comprising the + top 10% probability mass are considered. + + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + ModifyMessageRequest: + type: object + additionalProperties: false + properties: + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + ModifyRunRequest: + type: object + additionalProperties: false + properties: + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + ModifyThreadRequest: + type: object + additionalProperties: false + properties: + tool_resources: + type: object + description: > + A set of resources that are made available to the assistant's tools + in this thread. The resources are specific to the type of tool. For + example, the `code_interpreter` tool requires a list of file IDs, + while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector + store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. + properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + object: + type: string + description: The object type, which is always `file`. + enum: + - file + purpose: + type: string + description: The intended purpose of the file. Supported values are + `assistants`, `assistants_output`, `batch`, `batch_output`, + `fine-tune`, `fine-tune-results` and `vision`. + enum: + - assistants + - assistants_output + - batch + - batch_output + - fine-tune + - fine-tune-results + - vision + status: + type: string + deprecated: true + description: Deprecated. The current status of the file, which can be either + `uploaded`, `processed`, or `error`. + enum: + - uploaded + - processed + - error + status_details: + type: string + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed + validation, see the `error` field on `fine_tuning.job`. + required: + - id + - object + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + OtherChunkingStrategyResponseParam: + type: object + title: Other Chunking Strategy + description: This is returned when the chunking strategy is unknown. Typically, + this is because the file was indexed before the `chunking_strategy` + concept was introduced in the API. + additionalProperties: false + properties: + type: + type: string + description: Always `other`. + enum: + - other + required: + - type + ParallelToolCalls: + description: Whether to enable [parallel function + calling](/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + type: boolean + default: true + PredictionContent: + type: object + title: Static Content + description: > + Static predicted output content, such as the content of a text file that + is + + being regenerated. + required: + - type + - content + properties: + type: + type: string + enum: + - content + description: | + The type of the predicted content you want to provide. This type is + currently always `content`. + content: + x-oaiExpandable: true + description: > + The content that should be matched when generating a model response. + + If generated tokens would match this content, the entire model + response + + can be returned much more quickly. + oneOf: + - type: string + title: Text content + description: | + The content used for a Predicted Output. This is often the + text of a file you are regenerating with minor changes. + - type: array + description: An array of content parts with a defined type. Supported options + differ based on the [model](/docs/models) being used to generate + the response. Can contain text inputs. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + minItems: 1 + Project: + type: object + description: Represents an individual project. + properties: + id: + type: string + description: The identifier, which can be referenced in API endpoints + object: + type: string + enum: + - organization.project + description: The object type, which is always `organization.project` + name: + type: string + description: The name of the project. This appears in reporting. + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was created. + archived_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) of when the project was archived or + `null`. + status: + type: string + enum: + - active + - archived + description: "`active` or `archived`" + required: + - id + - object + - name + - created_at + - status + x-oaiMeta: + name: The project object + example: | + { + "id": "proj_abc", + "object": "organization.project", + "name": "Project example", + "created_at": 1711471533, + "archived_at": null, + "status": "active" + } + ProjectApiKey: + type: object + description: Represents an individual API key in a project. + properties: + object: + type: string + enum: + - organization.project.api_key + description: The object type, which is always `organization.project.api_key` + redacted_value: + type: string + description: The redacted value of the API key + name: + type: string + description: The name of the API key + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the API key was created + id: + type: string + description: The identifier, which can be referenced in API endpoints + owner: + type: object + properties: + type: + type: string + enum: + - user + - service_account + description: "`user` or `service_account`" + user: + $ref: "#/components/schemas/ProjectUser" + service_account: + $ref: "#/components/schemas/ProjectServiceAccount" + required: + - object + - redacted_value + - name + - created_at + - id + - owner + x-oaiMeta: + name: The project API key object + example: | + { + "object": "organization.project.api_key", + "redacted_value": "sk-abc...def", + "name": "My API Key", + "created_at": 1711471533, + "id": "key_abc", + "owner": { + "type": "user", + "user": { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "created_at": 1711471533 + } + } + } + ProjectApiKeyDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.api_key.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + ProjectApiKeyListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/ProjectApiKey" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectCreateRequest: + type: object + properties: + name: + type: string + description: The friendly name of the project, this name appears in reports. + required: + - name + ProjectListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/Project" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectRateLimit: + type: object + description: Represents a project rate limit config. + properties: + object: + type: string + enum: + - project.rate_limit + description: The object type, which is always `project.rate_limit` + id: + type: string + description: The identifier, which can be referenced in API endpoints. + model: + type: string + description: The model this rate limit applies to. + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only present for relevant models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only present for relevant + models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only present for relevant models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only present for relevant + models. + required: + - object + - id + - model + - max_requests_per_1_minute + - max_tokens_per_1_minute + x-oaiMeta: + name: The project rate limit object + example: | + { + "object": "project.rate_limit", + "id": "rl_ada", + "model": "ada", + "max_requests_per_1_minute": 600, + "max_tokens_per_1_minute": 150000, + "max_images_per_1_minute": 10 + } + ProjectRateLimitListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/ProjectRateLimit" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectRateLimitUpdateRequest: + type: object + properties: + max_requests_per_1_minute: + type: integer + description: The maximum requests per minute. + max_tokens_per_1_minute: + type: integer + description: The maximum tokens per minute. + max_images_per_1_minute: + type: integer + description: The maximum images per minute. Only relevant for certain models. + max_audio_megabytes_per_1_minute: + type: integer + description: The maximum audio megabytes per minute. Only relevant for certain + models. + max_requests_per_1_day: + type: integer + description: The maximum requests per day. Only relevant for certain models. + batch_1_day_max_input_tokens: + type: integer + description: The maximum batch input tokens per day. Only relevant for certain + models. + ProjectServiceAccount: + type: object + description: Represents an individual service account in a project. + properties: + object: + type: string + enum: + - organization.project.service_account + description: The object type, which is always + `organization.project.service_account` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the service account + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + created_at: + type: integer + description: The Unix timestamp (in seconds) of when the service account was + created + required: + - object + - id + - name + - role + - created_at + x-oaiMeta: + name: The project service account object + example: | + { + "object": "organization.project.service_account", + "id": "svc_acct_abc", + "name": "Service Account", + "role": "owner", + "created_at": 1711471533 + } + ProjectServiceAccountApiKey: + type: object + properties: + object: + type: string + enum: + - organization.project.service_account.api_key + description: The object type, which is always + `organization.project.service_account.api_key` + value: + type: string + name: + type: string + created_at: + type: integer + id: + type: string + required: + - object + - value + - name + - created_at + - id + ProjectServiceAccountCreateRequest: + type: object + properties: + name: + type: string + description: The name of the service account being created. + required: + - name + ProjectServiceAccountCreateResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.service_account + id: + type: string + name: + type: string + role: + type: string + enum: + - member + description: Service accounts can only have one role of type `member` + created_at: + type: integer + api_key: + $ref: "#/components/schemas/ProjectServiceAccountApiKey" + required: + - object + - id + - name + - role + - created_at + - api_key + ProjectServiceAccountDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.service_account.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + ProjectServiceAccountListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/ProjectServiceAccount" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectUpdateRequest: + type: object + properties: + name: + type: string + description: The updated name of the project, this name appears in reports. + required: + - name + ProjectUser: + type: object + description: Represents an individual user in a project. + properties: + object: + type: string + enum: + - organization.project.user + description: The object type, which is always `organization.project.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the project was added. + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The project user object + example: | + { + "object": "organization.project.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + ProjectUserCreateRequest: + type: object + properties: + user_id: + type: string + description: The ID of the user. + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + required: + - user_id + - role + ProjectUserDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.project.user.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + ProjectUserListResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: "#/components/schemas/ProjectUser" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + ProjectUserUpdateRequest: + type: object + properties: + role: + type: string + enum: + - owner + - member + description: "`owner` or `member`" + required: + - role + RealtimeClientEventConversationItemCreate: + type: object + description: > + Add a new Item to the Conversation's context, including messages, + function + + calls, and function call responses. This event can be used both to + populate a + + "history" of the conversation and to add new items mid-stream, but has + the + + current limitation that it cannot populate assistant audio messages. + + + If successful, the server will respond with a + `conversation.item.created` + + event, otherwise an `error` event will be sent. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - conversation.item.create + description: The event type, must be `conversation.item.create`. + previous_item_id: + type: string + description: > + The ID of the preceding item after which the new item will be + inserted. + + If not set, the new item will be appended to the end of the + conversation. + + If set, it allows an item to be inserted mid-conversation. If the + ID + + cannot be found, an error will be returned and the item will not be + added. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - type + - item + x-oaiMeta: + name: conversation.item.create + group: realtime + example: | + { + "event_id": "event_345", + "type": "conversation.item.create", + "previous_item_id": null, + "item": { + "id": "msg_001", + "type": "message", + "role": "user", + "content": [ + { + "type": "input_text", + "text": "Hello, how are you?" + } + ] + } + } + RealtimeClientEventConversationItemDelete: + type: object + description: > + Send this event when you want to remove any item from the conversation + + history. The server will respond with a `conversation.item.deleted` + event, + + unless the item does not exist in the conversation history, in which + case the + + server will respond with an error. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - conversation.item.delete + description: The event type, must be `conversation.item.delete`. + item_id: + type: string + description: The ID of the item to delete. + required: + - type + - item_id + x-oaiMeta: + name: conversation.item.delete + group: realtime + example: | + { + "event_id": "event_901", + "type": "conversation.item.delete", + "item_id": "msg_003" + } + RealtimeClientEventConversationItemTruncate: + type: object + description: > + Send this event to truncate a previous assistant message’s audio. The + server + + will produce audio faster than realtime, so this event is useful when + the user + + interrupts to truncate audio that has already been sent to the client + but not + + yet played. This will synchronize the server's understanding of the + audio with + + the client's playback. + + + Truncating audio will delete the server-side text transcript to ensure + there + + is not text in the context that hasn't been heard by the user. + + + If successful, the server will respond with a + `conversation.item.truncated` + + event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - conversation.item.truncate + description: The event type, must be `conversation.item.truncate`. + item_id: + type: string + description: > + The ID of the assistant message item to truncate. Only assistant + message + + items can be truncated. + content_index: + type: integer + description: The index of the content part to truncate. Set this to 0. + audio_end_ms: + type: integer + description: > + Inclusive duration up to which audio is truncated, in milliseconds. + If + + the audio_end_ms is greater than the actual audio duration, the + server + + will respond with an error. + required: + - type + - item_id + - content_index + - audio_end_ms + x-oaiMeta: + name: conversation.item.truncate + group: realtime + example: | + { + "event_id": "event_678", + "type": "conversation.item.truncate", + "item_id": "msg_002", + "content_index": 0, + "audio_end_ms": 1500 + } + RealtimeClientEventInputAudioBufferAppend: + type: object + description: > + Send this event to append audio bytes to the input audio buffer. The + audio + + buffer is temporary storage you can write to and later commit. In Server + VAD + + mode, the audio buffer is used to detect speech and the server will + decide + + when to commit. When Server VAD is disabled, you must commit the audio + buffer + + manually. + + + The client may choose how much audio to place in each event up to a + maximum + + of 15 MiB, for example streaming smaller chunks from the client may + allow the + + VAD to be more responsive. Unlike made other client events, the server + will + + not send a confirmation response to this event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - input_audio_buffer.append + description: The event type, must be `input_audio_buffer.append`. + audio: + type: string + description: > + Base64-encoded audio bytes. This must be in the format specified by + the + + `input_audio_format` field in the session configuration. + required: + - type + - audio + x-oaiMeta: + name: input_audio_buffer.append + group: realtime + example: | + { + "event_id": "event_456", + "type": "input_audio_buffer.append", + "audio": "Base64EncodedAudioData" + } + RealtimeClientEventInputAudioBufferClear: + type: object + description: | + Send this event to clear the audio bytes in the buffer. The server will + respond with an `input_audio_buffer.cleared` event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - input_audio_buffer.clear + description: The event type, must be `input_audio_buffer.clear`. + required: + - type + x-oaiMeta: + name: input_audio_buffer.clear + group: realtime + example: | + { + "event_id": "event_012", + "type": "input_audio_buffer.clear" + } + RealtimeClientEventInputAudioBufferCommit: + type: object + description: > + Send this event to commit the user input audio buffer, which will create + a + + new user message item in the conversation. This event will produce an + error + + if the input audio buffer is empty. When in Server VAD mode, the client + does + + not need to send this event, the server will commit the audio buffer + + automatically. + + + Committing the input audio buffer will trigger input audio + transcription + + (if enabled in session configuration), but it will not create a + response + + from the model. The server will respond with an + `input_audio_buffer.committed` + + event. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - input_audio_buffer.commit + description: The event type, must be `input_audio_buffer.commit`. + required: + - type + x-oaiMeta: + name: input_audio_buffer.commit + group: realtime + example: | + { + "event_id": "event_789", + "type": "input_audio_buffer.commit" + } + RealtimeClientEventResponseCancel: + type: object + description: > + Send this event to cancel an in-progress response. The server will + respond + + with a `response.cancelled` event or an error if there is no response + to + + cancel. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - response.cancel + description: The event type, must be `response.cancel`. + required: + - type + x-oaiMeta: + name: response.cancel + group: realtime + example: | + { + "event_id": "event_567", + "type": "response.cancel" + } + RealtimeClientEventResponseCreate: + type: object + description: > + This event instructs the server to create a Response, which means + triggering + + model inference. When in Server VAD mode, the server will create + Responses + + automatically. + + + A Response will include at least one Item, and may have two, in which + case + + the second will be a function call. These Items will be appended to the + + conversation history. + + + The server will respond with a `response.created` event, events for + Items + + and content created, and finally a `response.done` event to indicate + the + + Response is complete. + + + The `response.create` event includes inference configuration like + + `instructions`, and `temperature`. These fields will override the + Session's + + configuration for this Response only. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - response.create + description: The event type, must be `response.create`. + response: + $ref: "#/components/schemas/RealtimeSession" + required: + - type + - response + x-oaiMeta: + name: response.create + group: realtime + example: | + { + "event_id": "event_234", + "type": "response.create", + "response": { + "modalities": ["text", "audio"], + "instructions": "Please assist the user.", + "voice": "sage", + "output_audio_format": "pcm16", + "tools": [ + { + "type": "function", + "name": "calculate_sum", + "description": "Calculates the sum of two numbers.", + "parameters": { + "type": "object", + "properties": { + "a": { "type": "number" }, + "b": { "type": "number" } + }, + "required": ["a", "b"] + } + } + ], + "tool_choice": "auto", + "temperature": 0.7, + "max_output_tokens": 150 + } + } + RealtimeClientEventSessionUpdate: + type: object + description: > + Send this event to update the session’s default configuration. The + client may + + send this event at any time to update the session configuration, and + any + + field may be updated at any time, except for "voice". The server will + respond + + with a `session.updated` event that shows the full effective + configuration. + + Only fields that are present are updated, thus the correct way to clear + a + + field like "instructions" is to pass an empty string. + properties: + event_id: + type: string + description: Optional client-generated ID used to identify this event. + type: + type: string + enum: + - session.update + description: The event type, must be `session.update`. + session: + $ref: "#/components/schemas/RealtimeSession" + required: + - type + - session + x-oaiMeta: + name: session.update + group: realtime + example: | + { + "event_id": "event_123", + "type": "session.update", + "session": { + "modalities": ["text", "audio"], + "instructions": "You are a helpful assistant.", + "voice": "sage", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "input_audio_transcription": { + "model": "whisper-1" + }, + "turn_detection": { + "type": "server_vad", + "threshold": 0.5, + "prefix_padding_ms": 300, + "silence_duration_ms": 500 + }, + "tools": [ + { + "type": "function", + "name": "get_weather", + "description": "Get the current weather...", + "parameters": { + "type": "object", + "properties": { + "location": { "type": "string" } + }, + "required": ["location"] + } + } + ], + "tool_choice": "auto", + "temperature": 0.8, + "max_response_output_tokens": "inf" + } + } + RealtimeConversationItem: + type: object + x-oaiExpandable: true + description: The item to add to the conversation. + properties: + id: + type: string + description: > + The unique ID of the item, this can be generated by the client to + help + + manage server-side context, but is not required because the server + will + + generate one if not provided. + type: + type: string + enum: + - message + - function_call + - function_call_output + description: > + The type of the item (`message`, `function_call`, + `function_call_output`). + object: + type: string + enum: + - realtime.item + description: > + Identifier for the API object being returned - always + `realtime.item`. + status: + type: string + enum: + - completed + - incomplete + description: > + The status of the item (`completed`, `incomplete`). These have no + effect + + on the conversation, but are accepted for consistency with the + + `conversation.item.created` event. + role: + type: string + enum: + - user + - assistant + - systems + description: > + The role of the message sender (`user`, `assistant`, `system`), + only + + applicable for `message` items. + content: + type: array + x-oaiExpandable: true + description: > + The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + + - Message items of role `user` support `input_text` and + `input_audio` + content + - Message items of role `assistant` support `text` content. + items: type: object + x-oaiExpandable: true properties: - object: - enum: - - organization.project.service_account.api_key - type: string - description: 'The object type, which is always `organization.project.service_account.api_key`' - value: - type: string - name: - type: string - created_at: - type: integer - id: - type: string - ProjectServiceAccountDeleteResponse: - required: - - object - - id - - deleted + type: + type: string + enum: + - input_audio + - input_text + - text + description: The content type (`input_text`, `input_audio`, `text`). + text: + type: string + description: > + The text content, used for `input_text` and `text` content + types. + audio: + type: string + description: > + Base64-encoded audio bytes, used for `input_audio` content + type. + transcript: + type: string + description: > + The transcript of the audio, used for `input_audio` content + type. + call_id: + type: string + description: > + The ID of the function call (for `function_call` and + + `function_call_output` items). If passed on a + `function_call_output` + + item, the server will check that a `function_call` item with the + same + + ID exists in the conversation history. + name: + type: string + description: | + The name of the function being called (for `function_call` items). + arguments: + type: string + description: | + The arguments of the function call (for `function_call` items). + output: + type: string + description: | + The output of the function call (for `function_call_output` items). + RealtimeResponse: + type: object + description: The response resource. + properties: + id: + type: string + description: The unique ID of the response. + object: + type: string + enum: + - realtime.response + description: The object type, must be `realtime.response`. + status: + type: string + enum: + - completed + - cancelled + - failed + - incomplete + description: > + The final status of the response (`completed`, `cancelled`, + `failed`, or + + `incomplete`). + status_details: + type: object + description: Additional details about the status. + properties: + type: + type: string + enum: + - completed + - cancelled + - failed + - incomplete + description: > + The type of error that caused the response to fail, + corresponding + + with the `status` field (`cancelled`, `incomplete`, `failed`). + reason: + type: string + enum: + - turn_detected + - client_cancelled + - max_output_tokens + - content_filter + description: > + The reason the Response did not complete. For a `cancelled` + Response, + + one of `turn_detected` (the server VAD detected a new start of + speech) + + or `client_cancelled` (the client sent a cancel event). For an + + `incomplete` Response, one of `max_output_tokens` or + `content_filter` + + (the server-side safety filter activated and cut off the + response). + error: + type: object + description: | + A description of the error that caused the response to fail, + populated when the `status` is `failed`. + properties: + type: + type: string + description: The type of error. + code: + type: string + description: Error code, if any. + output: + type: array + description: The list of output items generated by the response. + items: + $ref: "#/components/schemas/RealtimeConversationItem" + usage: + type: object + description: > + Usage statistics for the Response, this will correspond to billing. + A + + Realtime API session will maintain a conversation context and append + new + + Items to the Conversation, thus output from previous turns (text + and + + audio tokens) will become the input for later turns. + properties: + total_tokens: + type: integer + description: > + The total number of tokens in the Response including input and + output + + text and audio tokens. + input_tokens: + type: integer + description: > + The number of input tokens used in the Response, including text + and + + audio tokens. + output_tokens: + type: integer + description: > + The number of output tokens sent in the Response, including text + and + + audio tokens. + input_token_details: + type: object + description: Details about the input tokens used in the Response. + properties: + cached_tokens: + type: integer + description: The number of cached tokens used in the Response. + text_tokens: + type: integer + description: The number of text tokens used in the Response. + audio_tokens: + type: integer + description: The number of audio tokens used in the Response. + output_token_details: + type: object + description: Details about the output tokens used in the Response. + properties: + text_tokens: + type: integer + description: The number of text tokens used in the Response. + audio_tokens: + type: integer + description: The number of audio tokens used in the Response. + RealtimeServerEventConversationCreated: + type: object + description: > + Returned when a conversation is created. Emitted right after session + creation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.created + description: The event type, must be `conversation.created`. + conversation: + type: object + description: The conversation resource. + properties: + id: + type: string + description: The unique ID of the conversation. + object: + type: string + description: The object type, must be `realtime.conversation`. + required: + - event_id + - type + - conversation + x-oaiMeta: + name: conversation.created + group: realtime + example: | + { + "event_id": "event_9101", + "type": "conversation.created", + "conversation": { + "id": "conv_001", + "object": "realtime.conversation" + } + } + RealtimeServerEventConversationItemCreated: + type: object + description: > + Returned when a conversation item is created. There are several + scenarios that + + produce this event: + - The server is generating a Response, which if successful will produce + either one or two Items, which will be of type `message` + (role `assistant`) or type `function_call`. + - The input audio buffer has been committed, either by the client or the + server (in `server_vad` mode). The server will take the content of the + input audio buffer and add it to a new user message Item. + - The client has sent a `conversation.item.create` event to add a new Item + to the Conversation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.created + description: The event type, must be `conversation.item.created`. + previous_item_id: + type: string + description: > + The ID of the preceding item in the Conversation context, allows + the + + client to understand the order of the conversation. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - event_id + - type + - previous_item_id + - item + x-oaiMeta: + name: conversation.item.created + group: realtime + example: | + { + "event_id": "event_1920", + "type": "conversation.item.created", + "previous_item_id": "msg_002", + "item": { + "id": "msg_003", + "object": "realtime.item", + "type": "message", + "status": "completed", + "role": "user", + "content": [ + { + "type": "input_audio", + "transcript": "hello how are you", + "audio": "base64encodedaudio==" + } + ] + } + } + RealtimeServerEventConversationItemDeleted: + type: object + description: > + Returned when an item in the conversation is deleted by the client with + a + + `conversation.item.delete` event. This event is used to synchronize the + + server's understanding of the conversation history with the client's + view. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.deleted + description: The event type, must be `conversation.item.deleted`. + item_id: + type: string + description: The ID of the item that was deleted. + required: + - event_id + - type + - item_id + x-oaiMeta: + name: conversation.item.deleted + group: realtime + example: | + { + "event_id": "event_2728", + "type": "conversation.item.deleted", + "item_id": "msg_005" + } + RealtimeServerEventConversationItemInputAudioTranscriptionCompleted: + type: object + description: > + This event is the output of audio transcription for user audio written + to the + + user audio buffer. Transcription begins when the input audio buffer is + + committed by the client or server (in `server_vad` mode). Transcription + runs + + asynchronously with Response creation, so this event may come before or + after + + the Response events. + + + Realtime API models accept audio natively, and thus input transcription + is a + + separate process run on a separate ASR (Automatic Speech Recognition) + model, + + currently always `whisper-1`. Thus the transcript may diverge somewhat + from + + the model's interpretation, and should be treated as a rough guide. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.input_audio_transcription.completed + description: | + The event type, must be + `conversation.item.input_audio_transcription.completed`. + item_id: + type: string + description: The ID of the user message item containing the audio. + content_index: + type: integer + description: The index of the content part containing the audio. + transcript: + type: string + description: The transcribed text. + required: + - event_id + - type + - item_id + - content_index + - transcript + x-oaiMeta: + name: conversation.item.input_audio_transcription.completed + group: realtime + example: | + { + "event_id": "event_2122", + "type": "conversation.item.input_audio_transcription.completed", + "item_id": "msg_003", + "content_index": 0, + "transcript": "Hello, how are you?" + } + RealtimeServerEventConversationItemInputAudioTranscriptionFailed: + type: object + description: > + Returned when input audio transcription is configured, and a + transcription + + request for a user message failed. These events are separate from other + + `error` events so that the client can identify the related Item. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.input_audio_transcription.failed + description: | + The event type, must be + `conversation.item.input_audio_transcription.failed`. + item_id: + type: string + description: The ID of the user message item. + content_index: + type: integer + description: The index of the content part containing the audio. + error: + type: object + description: Details of the transcription error. + properties: + type: + type: string + description: The type of error. + code: + type: string + description: Error code, if any. + message: + type: string + description: A human-readable error message. + param: + type: string + description: Parameter related to the error, if any. + required: + - event_id + - type + - item_id + - content_index + - error + x-oaiMeta: + name: conversation.item.input_audio_transcription.failed + group: realtime + example: | + { + "event_id": "event_2324", + "type": "conversation.item.input_audio_transcription.failed", + "item_id": "msg_003", + "content_index": 0, + "error": { + "type": "transcription_error", + "code": "audio_unintelligible", + "message": "The audio could not be transcribed.", + "param": null + } + } + RealtimeServerEventConversationItemTruncated: + type: object + description: > + Returned when an earlier assistant audio message item is truncated by + the + + client with a `conversation.item.truncate` event. This event is used to + + synchronize the server's understanding of the audio with the client's + playback. + + + This action will truncate the audio and remove the server-side text + transcript + + to ensure there is no text in the context that hasn't been heard by the + user. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - conversation.item.truncated + description: The event type, must be `conversation.item.truncated`. + item_id: + type: string + description: The ID of the assistant message item that was truncated. + content_index: + type: integer + description: The index of the content part that was truncated. + audio_end_ms: + type: integer + description: | + The duration up to which the audio was truncated, in milliseconds. + required: + - event_id + - type + - item_id + - content_index + - audio_end_ms + x-oaiMeta: + name: conversation.item.truncated + group: realtime + example: | + { + "event_id": "event_2526", + "type": "conversation.item.truncated", + "item_id": "msg_004", + "content_index": 0, + "audio_end_ms": 1500 + } + RealtimeServerEventError: + type: object + description: > + Returned when an error occurs, which could be a client problem or a + server + + problem. Most errors are recoverable and the session will stay open, we + + recommend to implementors to monitor and log error messages by default. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - error + description: The event type, must be `error`. + error: + type: object + description: Details of the error. + properties: + type: + type: string + description: > + The type of error (e.g., "invalid_request_error", + "server_error"). + code: + type: string + description: Error code, if any. + message: + type: string + description: A human-readable error message. + param: + type: string + description: Parameter related to the error, if any. + event_id: + type: string + description: > + The event_id of the client event that caused the error, if + applicable. + required: + - event_id + - type + - error + x-oaiMeta: + name: error + group: realtime + example: | + { + "event_id": "event_890", + "type": "error", + "error": { + "type": "invalid_request_error", + "code": "invalid_event", + "message": "The 'type' field is missing.", + "param": null, + "event_id": "event_567" + } + } + RealtimeServerEventInputAudioBufferCleared: + type: object + description: | + Returned when the input audio buffer is cleared by the client with a + `input_audio_buffer.clear` event. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.cleared + description: The event type, must be `input_audio_buffer.cleared`. + required: + - event_id + - type + x-oaiMeta: + name: input_audio_buffer.cleared + group: realtime + example: | + { + "event_id": "event_1314", + "type": "input_audio_buffer.cleared" + } + RealtimeServerEventInputAudioBufferCommitted: + type: object + description: > + Returned when an input audio buffer is committed, either by the client + or + + automatically in server VAD mode. The `item_id` property is the ID of + the user + + message item that will be created, thus a `conversation.item.created` + event + + will also be sent to the client. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.committed + description: The event type, must be `input_audio_buffer.committed`. + previous_item_id: + type: string + description: > + The ID of the preceding item after which the new item will be + inserted. + item_id: + type: string + description: The ID of the user message item that will be created. + required: + - event_id + - type + - previous_item_id + - item_id + x-oaiMeta: + name: input_audio_buffer.committed + group: realtime + example: | + { + "event_id": "event_1121", + "type": "input_audio_buffer.committed", + "previous_item_id": "msg_001", + "item_id": "msg_002" + } + RealtimeServerEventInputAudioBufferSpeechStarted: + type: object + description: > + Sent by the server when in `server_vad` mode to indicate that speech has + been + + detected in the audio buffer. This can happen any time audio is added to + the + + buffer (unless speech is already detected). The client may want to use + this + + event to interrupt audio playback or provide visual feedback to the + user. + + + The client should expect to receive a + `input_audio_buffer.speech_stopped` event + + when speech stops. The `item_id` property is the ID of the user message + item + + that will be created when speech stops and will also be included in the + + `input_audio_buffer.speech_stopped` event (unless the client manually + commits + + the audio buffer during VAD activation). + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.speech_started + description: The event type, must be `input_audio_buffer.speech_started`. + audio_start_ms: + type: integer + description: > + Milliseconds from the start of all audio written to the buffer + during the + + session when speech was first detected. This will correspond to the + + beginning of audio sent to the model, and thus includes the + + `prefix_padding_ms` configured in the Session. + item_id: + type: string + description: > + The ID of the user message item that will be created when speech + stops. + required: + - event_id + - type + - audio_start_ms + - item_id + x-oaiMeta: + name: input_audio_buffer.speech_started + group: realtime + example: | + { + "event_id": "event_1516", + "type": "input_audio_buffer.speech_started", + "audio_start_ms": 1000, + "item_id": "msg_003" + } + RealtimeServerEventInputAudioBufferSpeechStopped: + type: object + description: > + Returned in `server_vad` mode when the server detects the end of speech + in + + the audio buffer. The server will also send an + `conversation.item.created` + + event with the user message item that is created from the audio buffer. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - input_audio_buffer.speech_stopped + description: The event type, must be `input_audio_buffer.speech_stopped`. + audio_end_ms: + type: integer + description: > + Milliseconds since the session started when speech stopped. This + will + + correspond to the end of audio sent to the model, and thus includes + the + + `min_silence_duration_ms` configured in the Session. + item_id: + type: string + description: The ID of the user message item that will be created. + required: + - event_id + - type + - audio_end_ms + - item_id + x-oaiMeta: + name: input_audio_buffer.speech_stopped + group: realtime + example: | + { + "event_id": "event_1718", + "type": "input_audio_buffer.speech_stopped", + "audio_end_ms": 2000, + "item_id": "msg_003" + } + RealtimeServerEventRateLimitsUpdated: + type: object + description: > + Emitted at the beginning of a Response to indicate the updated rate + limits. + + When a Response is created some tokens will be "reserved" for the + output + + tokens, the rate limits shown here reflect that reservation, which is + then + + adjusted accordingly once the Response is completed. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - rate_limits.updated + description: The event type, must be `rate_limits.updated`. + rate_limits: + type: array + description: List of rate limit information. + items: type: object properties: - object: - enum: - - organization.project.service_account.deleted - type: string - id: - type: string - deleted: - type: boolean - ProjectApiKey: - required: - - object - - redacted_value - - name - - created_at - - id - - owner + name: + type: string + description: The name of the rate limit (`requests`, `tokens`). + limit: + type: integer + description: The maximum allowed value for the rate limit. + remaining: + type: integer + description: The remaining value before the limit is reached. + reset_seconds: + type: number + description: Seconds until the rate limit resets. + required: + - event_id + - type + - rate_limits + x-oaiMeta: + name: rate_limits.updated + group: realtime + example: | + { + "event_id": "event_5758", + "type": "rate_limits.updated", + "rate_limits": [ + { + "name": "requests", + "limit": 1000, + "remaining": 999, + "reset_seconds": 60 + }, + { + "name": "tokens", + "limit": 50000, + "remaining": 49950, + "reset_seconds": 60 + } + ] + } + RealtimeServerEventResponseAudioDelta: + type: object + description: Returned when the model-generated audio is updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio.delta + description: The event type, must be `response.audio.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: Base64-encoded audio data delta. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta + x-oaiMeta: + name: response.audio.delta + group: realtime + example: | + { + "event_id": "event_4950", + "type": "response.audio.delta", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0, + "delta": "Base64EncodedAudioDelta" + } + RealtimeServerEventResponseAudioDone: + type: object + description: > + Returned when the model-generated audio is done. Also emitted when a + Response + + is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio.done + description: The event type, must be `response.audio.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + x-oaiMeta: + name: response.audio.done + group: realtime + example: | + { + "event_id": "event_5152", + "type": "response.audio.done", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0 + } + RealtimeServerEventResponseAudioTranscriptDelta: + type: object + description: > + Returned when the model-generated transcription of audio output is + updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio_transcript.delta + description: The event type, must be `response.audio_transcript.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The transcript delta. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta + x-oaiMeta: + name: response.audio_transcript.delta + group: realtime + example: | + { + "event_id": "event_4546", + "type": "response.audio_transcript.delta", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0, + "delta": "Hello, how can I a" + } + RealtimeServerEventResponseAudioTranscriptDone: + type: object + description: | + Returned when the model-generated transcription of audio output is done + streaming. Also emitted when a Response is interrupted, incomplete, or + cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.audio_transcript.done + description: The event type, must be `response.audio_transcript.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + transcript: + type: string + description: The final transcript of the audio. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - transcript + x-oaiMeta: + name: response.audio_transcript.done + group: realtime + example: | + { + "event_id": "event_4748", + "type": "response.audio_transcript.done", + "response_id": "resp_001", + "item_id": "msg_008", + "output_index": 0, + "content_index": 0, + "transcript": "Hello, how can I assist you today?" + } + RealtimeServerEventResponseContentPartAdded: + type: object + description: > + Returned when a new content part is added to an assistant message item + during + + response generation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.content_part.added + description: The event type, must be `response.content_part.added`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item to which the content part was added. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + part: + type: object + description: The content part that was added. + properties: + type: + type: string + enum: + - audio + - text + description: The content type ("text", "audio"). + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part + x-oaiMeta: + name: response.content_part.added + group: realtime + example: | + { + "event_id": "event_3738", + "type": "response.content_part.added", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "part": { + "type": "text", + "text": "" + } + } + RealtimeServerEventResponseContentPartDone: + type: object + description: > + Returned when a content part is done streaming in an assistant message + item. + + Also emitted when a Response is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.content_part.done + description: The event type, must be `response.content_part.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + part: + type: object + description: The content part that is done. + properties: + type: + type: string + description: The content type ("text", "audio"). + text: + type: string + description: The text content (if type is "text"). + audio: + type: string + description: Base64-encoded audio data (if type is "audio"). + transcript: + type: string + description: The transcript of the audio (if type is "audio"). + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - part + x-oaiMeta: + name: response.content_part.done + group: realtime + example: | + { + "event_id": "event_3940", + "type": "response.content_part.done", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "part": { + "type": "text", + "text": "Sure, I can help with that." + } + } + RealtimeServerEventResponseCreated: + type: object + description: > + Returned when a new Response is created. The first event of response + creation, + + where the response is in an initial state of `in_progress`. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.created + description: The event type, must be `response.created`. + response: + $ref: "#/components/schemas/RealtimeResponse" + required: + - event_id + - type + - response + x-oaiMeta: + name: response.created + group: realtime + example: | + { + "event_id": "event_2930", + "type": "response.created", + "response": { + "id": "resp_001", + "object": "realtime.response", + "status": "in_progress", + "status_details": null, + "output": [], + "usage": null + } + } + RealtimeServerEventResponseDone: + type: object + description: > + Returned when a Response is done streaming. Always emitted, no matter + the + + final state. The Response object included in the `response.done` event + will + + include all output Items in the Response but will omit the raw audio + data. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.done + description: The event type, must be `response.done`. + response: + $ref: "#/components/schemas/RealtimeResponse" + required: + - event_id + - type + - response + x-oaiMeta: + name: response.done + group: realtime + example: | + { + "event_id": "event_3132", + "type": "response.done", + "response": { + "id": "resp_001", + "object": "realtime.response", + "status": "completed", + "status_details": null, + "output": [ + { + "id": "msg_006", + "object": "realtime.item", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "text", + "text": "Sure, how can I assist you today?" + } + ] + } + ], + "usage": { + "total_tokens":275, + "input_tokens":127, + "output_tokens":148, + "input_token_details": { + "cached_tokens":384, + "text_tokens":119, + "audio_tokens":8, + "cached_tokens_details": { + "text_tokens": 128, + "audio_tokens": 256 + } + }, + "output_token_details": { + "text_tokens":36, + "audio_tokens":112 + } + } + } + } + RealtimeServerEventResponseFunctionCallArgumentsDelta: + type: object + description: | + Returned when the model-generated function call arguments are updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.function_call_arguments.delta + description: | + The event type, must be `response.function_call_arguments.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the function call item. + output_index: + type: integer + description: The index of the output item in the response. + call_id: + type: string + description: The ID of the function call. + delta: + type: string + description: The arguments delta as a JSON string. + required: + - event_id + - type + - response_id + - item_id + - output_index + - call_id + - delta + x-oaiMeta: + name: response.function_call_arguments.delta + group: realtime + example: | + { + "event_id": "event_5354", + "type": "response.function_call_arguments.delta", + "response_id": "resp_002", + "item_id": "fc_001", + "output_index": 0, + "call_id": "call_001", + "delta": "{\"location\": \"San\"" + } + RealtimeServerEventResponseFunctionCallArgumentsDone: + type: object + description: > + Returned when the model-generated function call arguments are done + streaming. + + Also emitted when a Response is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.function_call_arguments.done + description: | + The event type, must be `response.function_call_arguments.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the function call item. + output_index: + type: integer + description: The index of the output item in the response. + call_id: + type: string + description: The ID of the function call. + arguments: + type: string + description: The final arguments as a JSON string. + required: + - event_id + - type + - response_id + - item_id + - output_index + - call_id + - arguments + x-oaiMeta: + name: response.function_call_arguments.done + group: realtime + example: | + { + "event_id": "event_5556", + "type": "response.function_call_arguments.done", + "response_id": "resp_002", + "item_id": "fc_001", + "output_index": 0, + "call_id": "call_001", + "arguments": "{\"location\": \"San Francisco\"}" + } + RealtimeServerEventResponseOutputItemAdded: + type: object + description: Returned when a new Item is created during Response generation. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.output_item.added + description: The event type, must be `response.output_item.added`. + response_id: + type: string + description: The ID of the Response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the Response. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - event_id + - type + - response_id + - output_index + - item + x-oaiMeta: + name: response.output_item.added + group: realtime + example: | + { + "event_id": "event_3334", + "type": "response.output_item.added", + "response_id": "resp_001", + "output_index": 0, + "item": { + "id": "msg_007", + "object": "realtime.item", + "type": "message", + "status": "in_progress", + "role": "assistant", + "content": [] + } + } + RealtimeServerEventResponseOutputItemDone: + type: object + description: > + Returned when an Item is done streaming. Also emitted when a Response + is + + interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.output_item.done + description: The event type, must be `response.output_item.done`. + response_id: + type: string + description: The ID of the Response to which the item belongs. + output_index: + type: integer + description: The index of the output item in the Response. + item: + $ref: "#/components/schemas/RealtimeConversationItem" + required: + - event_id + - type + - response_id + - output_index + - item + x-oaiMeta: + name: response.output_item.done + group: realtime + example: | + { + "event_id": "event_3536", + "type": "response.output_item.done", + "response_id": "resp_001", + "output_index": 0, + "item": { + "id": "msg_007", + "object": "realtime.item", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + { + "type": "text", + "text": "Sure, I can help with that." + } + ] + } + } + RealtimeServerEventResponseTextDelta: + type: object + description: Returned when the text value of a "text" content part is updated. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.text.delta + description: The event type, must be `response.text.delta`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + delta: + type: string + description: The text delta. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - delta + x-oaiMeta: + name: response.text.delta + group: realtime + example: | + { + "event_id": "event_4142", + "type": "response.text.delta", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "delta": "Sure, I can h" + } + RealtimeServerEventResponseTextDone: + type: object + description: > + Returned when the text value of a "text" content part is done streaming. + Also + + emitted when a Response is interrupted, incomplete, or cancelled. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - response.text.done + description: The event type, must be `response.text.done`. + response_id: + type: string + description: The ID of the response. + item_id: + type: string + description: The ID of the item. + output_index: + type: integer + description: The index of the output item in the response. + content_index: + type: integer + description: The index of the content part in the item's content array. + text: + type: string + description: The final text content. + required: + - event_id + - type + - response_id + - item_id + - output_index + - content_index + - text + x-oaiMeta: + name: response.text.done + group: realtime + example: | + { + "event_id": "event_4344", + "type": "response.text.done", + "response_id": "resp_001", + "item_id": "msg_007", + "output_index": 0, + "content_index": 0, + "text": "Sure, I can help with that." + } + RealtimeServerEventSessionCreated: + type: object + description: > + Returned when a Session is created. Emitted automatically when a new + + connection is established as the first server event. This event will + contain + + the default Session configuration. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - session.created + description: The event type, must be `session.created`. + session: + $ref: "#/components/schemas/RealtimeSession" + required: + - event_id + - type + - session + x-oaiMeta: + name: session.created + group: realtime + example: | + { + "event_id": "event_1234", + "type": "session.created", + "session": { + "id": "sess_001", + "object": "realtime.session", + "model": "gpt-4o-realtime-preview-2024-10-01", + "modalities": ["text", "audio"], + "instructions": "...model instructions here...", + "voice": "sage", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "input_audio_transcription": null, + "turn_detection": { + "type": "server_vad", + "threshold": 0.5, + "prefix_padding_ms": 300, + "silence_duration_ms": 200 + }, + "tools": [], + "tool_choice": "auto", + "temperature": 0.8, + "max_response_output_tokens": "inf" + } + } + RealtimeServerEventSessionUpdated: + type: object + description: > + Returned when a session is updated with a `session.update` event, + unless + + there is an error. + properties: + event_id: + type: string + description: The unique ID of the server event. + type: + type: string + enum: + - session.updated + description: The event type, must be `session.updated`. + session: + $ref: "#/components/schemas/RealtimeSession" + required: + - event_id + - type + - session + x-oaiMeta: + name: session.updated + group: realtime + example: | + { + "event_id": "event_5678", + "type": "session.updated", + "session": { + "id": "sess_001", + "object": "realtime.session", + "model": "gpt-4o-realtime-preview-2024-10-01", + "modalities": ["text"], + "instructions": "New instructions", + "voice": "sage", + "input_audio_format": "pcm16", + "output_audio_format": "pcm16", + "input_audio_transcription": { + "model": "whisper-1" + }, + "turn_detection": null, + "tools": [], + "tool_choice": "none", + "temperature": 0.7, + "max_response_output_tokens": 200 + } + } + RealtimeSession: + type: object + description: Realtime session object configuration. + properties: + modalities: + description: | + The set of modalities the model can respond with. To disable audio, + set this to ["text"]. + items: + type: string + enum: + - text + - audio + instructions: + type: string + description: > + The default system instructions (i.e. system message) prepended to + model + + calls. This field allows the client to guide the model on desired + + responses. The model can be instructed on response content and + format, + + (e.g. "be extremely succinct", "act friendly", "here are examples of + good + + responses") and on audio behavior (e.g. "talk quickly", "inject + emotion + + into your voice", "laugh frequently"). The instructions are not + guaranteed + + to be followed by the model, but they provide guidance to the model + on the + + desired behavior. + + + Note that the server sets default instructions which will be used if + this + + field is not set and are visible in the `session.created` event at + the + + start of the session. + voice: + type: string + enum: + - alloy + - ash + - ballad + - coral + - echo + - sage + - shimmer + - verse + description: > + The voice the model uses to respond. Current voice options are + `ash`, + + `ballad`, `coral`, `sage`, and `verse`. + + + Also supported but not recommended are `alloy`, `echo`, and + `shimmer`. + + These older voices are less expressive. + + + Voice cannot be changed during the session once the model has + + responded with audio at least once. + input_audio_format: + type: string + description: > + The format of input audio. Options are `pcm16`, `g711_ulaw`, or + `g711_alaw`. + output_audio_format: + type: string + description: > + The format of output audio. Options are `pcm16`, `g711_ulaw`, or + `g711_alaw`. + input_audio_transcription: + type: object + description: > + Configuration for input audio transcription, defaults to off and can + be + + set to `null` to turn off once on. Input audio transcription is not + native + + to the model, since the model consumes audio directly. Transcription + runs + + asynchronously through Whisper and should be treated as rough + guidance + + rather than the representation understood by the model. + properties: + model: + type: string + description: > + The model to use for transcription, `whisper-1` is the only + currently + + supported model. + turn_detection: + type: object + description: > + Configuration for turn detection. Can be set to `null` to turn off. + Server + + VAD means that the model will detect the start and end of speech + based on + + audio volume and respond at the end of user speech. + properties: + type: + type: string + description: > + Type of turn detection, only `server_vad` is currently supported. + threshold: + type: number + description: > + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + A + + higher threshold will require louder audio to activate the + model, and + + thus might perform better in noisy environments. + prefix_padding_ms: + type: integer + description: | + Amount of audio to include before the VAD detected speech (in + milliseconds). Defaults to 300ms. + silence_duration_ms: + type: integer + description: > + Duration of silence to detect speech stop (in milliseconds). + Defaults + + to 500ms. With shorter values the model will respond more + quickly, + + but may jump in on short pauses from the user. + tools: + type: array + description: Tools (functions) available to the model. + items: type: object properties: - object: - enum: - - organization.project.api_key - type: string - description: 'The object type, which is always `organization.project.api_key`' - redacted_value: - type: string - description: The redacted value of the API key - name: - type: string - description: The name of the API key - created_at: - type: integer - description: The Unix timestamp (in seconds) of when the API key was created - id: - type: string - description: 'The identifier, which can be referenced in API endpoints' - owner: - type: object - properties: - type: - enum: - - user - - service_account - type: string - description: '`user` or `service_account`' - user: - $ref: '#/components/schemas/ProjectUser' - service_account: - $ref: '#/components/schemas/ProjectServiceAccount' - description: Represents an individual API key in a project. - x-oaiMeta: - name: The project API key object - example: "{\n \"object\": \"organization.project.api_key\",\n \"redacted_value\": \"sk-abc...def\",\n \"name\": \"My API Key\",\n \"created_at\": 1711471533,\n \"id\": \"key_abc\",\n \"owner\": {\n \"type\": \"user\",\n \"user\": {\n \"object\": \"organization.project.user\",\n \"id\": \"user_abc\",\n \"name\": \"First Last\",\n \"email\": \"user@example.com\",\n \"role\": \"owner\",\n \"added_at\": 1711471533\n }\n }\n}\n" - ProjectApiKeyListResponse: - required: - - object - - data - - first_id - - last_id - - has_more + type: + type: string + enum: + - function + description: The type of the tool, i.e. `function`. + name: + type: string + description: The name of the function. + description: + type: string + description: > + The description of the function, including guidance on when + and how + + to call it, and guidance about what to tell the user when + calling + + (if anything). + parameters: + type: object + description: Parameters of the function in JSON Schema. + tool_choice: + type: string + description: > + How the model chooses tools. Options are `auto`, `none`, `required`, + or + + specify a function. + temperature: + type: number + description: > + Sampling temperature for the model, limited to [0.6, 1.2]. Defaults + to 0.8. + max_response_output_tokens: + oneOf: + - type: integer + - type: string + enum: + - inf + description: | + Maximum number of output tokens for a single assistant response, + inclusive of tool calls. Provide an integer between 1 and 4096 to + limit output tokens, or `inf` for the maximum available tokens for a + given model. Defaults to `inf`. + ResponseFormatJsonObject: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_object`" + enum: + - json_object + required: + - type + ResponseFormatJsonSchema: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `json_schema`" + enum: + - json_schema + json_schema: + type: object + properties: + description: + type: string + description: A description of what the response format is for, used by the model + to determine how to respond in the format. + name: + type: string + description: The name of the response format. Must be a-z, A-Z, 0-9, or contain + underscores and dashes, with a maximum length of 64. + schema: + $ref: "#/components/schemas/ResponseFormatJsonSchemaSchema" + strict: + type: boolean + nullable: true + default: false + description: Whether to enable strict schema adherence when generating the + output. If set to true, the model will always follow the exact + schema defined in the `schema` field. Only a subset of JSON + Schema is supported when `strict` is `true`. To learn more, read + the [Structured Outputs guide](/docs/guides/structured-outputs). + required: + - type + - name + required: + - type + - json_schema + ResponseFormatJsonSchemaSchema: + type: object + description: The schema for the response format, described as a JSON Schema object. + additionalProperties: true + ResponseFormatText: + type: object + properties: + type: + type: string + description: "The type of response format being defined: `text`" + enum: + - text + required: + - type + RunCompletionUsage: + type: object + description: Usage statistics related to the run. This value will be `null` if + the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + RunObject: + type: object + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run`. + type: string + enum: + - thread.run + created_at: + description: The Unix timestamp (in seconds) for when the run was created. + type: integer + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was + executed on as a part of this run. + type: string + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for + execution of this run. + type: string + status: + description: The status of the run, which can be either `queued`, `in_progress`, + `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + `incomplete`, or `expired`. + type: string + enum: + - queued + - in_progress + - requires_action + - cancelling + - cancelled + - failed + - completed + - incomplete + - expired + required_action: + type: object + description: Details on the action required to continue the run. Will be `null` + if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: + - submit_tool_outputs + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there + are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: + - server_error + - rate_limit_exceeded + - invalid_prompt + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. + type: integer + nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is + not incomplete. + type: object + nullable: true + properties: + reason: + description: The reason why the run is incomplete. This will point to which + specific token limit was reached over the course of the run. + type: string + enum: + - max_completion_tokens + - max_prompt_tokens + model: + description: The model that the [assistant](/docs/api-reference/assistants) used + for this run. + type: string + instructions: + description: The instructions that the + [assistant](/docs/api-reference/assistants) used for this run. + type: string + tools: + description: The list of tools that the + [assistant](/docs/api-reference/assistants) used for this run. + default: [] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to + 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults + to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: > + The maximum number of prompt tokens specified to have been used over + the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: > + The maximum number of completion tokens specified to have been used + over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + parallel_tool_calls: + $ref: "#/components/schemas/ParallelToolCalls" + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - parallel_tool_calls + - response_format + x-oaiMeta: + name: The run object + beta: true + example: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4o", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto", + "parallel_tool_calls": true + } + RunStepCompletionUsage: + type: object + description: Usage statistics related to the run step. This value will be `null` + while the run step's status is `in_progress`. + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + RunStepDeltaObject: + type: object + title: Run step delta object + description: > + Represents a run step delta i.e. any changed fields on a run step during + streaming. + properties: + id: + description: The identifier of the run step, which can be referenced in API + endpoints. + type: string + object: + description: The object type, which is always `thread.run.step.delta`. + type: string + enum: + - thread.run.step.delta + delta: + description: The delta containing the fields that have changed on the run step. + type: object + properties: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: + - message_creation + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - type + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be + `code_interpreter` for this type of tool call. + enum: + - code_interpreter + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter + can output one or more items, including text (`logs`) or images + (`image`). Each of these are represented by a different object + type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObjec\ + t" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObje\ + ct" + x-oaiExpandable: true + required: + - index + - type + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `image`. + type: string + enum: + - image + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - index + - type + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `logs`. + type: string + enum: + - logs + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - index + - type + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for + this type of tool call. + enum: + - file_search + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - index + - type + - file_search + RunStepDeltaStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for + this type of tool call. + enum: + - function + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have + not been [submitted](/docs/api-reference/runs/submitToolOutputs) + yet. + nullable: true + required: + - index + - type + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: + - tool_calls + tool_calls: + type: array + description: > + An array of tool calls the run step was involved in. These can be + associated with one of three types of tools: `code_interpreter`, + `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + RunStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: + - message_creation + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be + `code_interpreter` for this type of tool call. + enum: + - code_interpreter + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + required: + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter + can output one or more items, including text (`logs`) or images + (`image`). Each of these are represented by a different object + type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: + - image + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - file_id + required: + - type + - image + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. + type: string + enum: + - logs + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for + this type of tool call. + enum: + - file_search + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + properties: + ranking_options: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchRankingOptionsObje\ + ct" + results: + type: array + description: The results of the file search. + items: + $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchResultObject" + required: + - id + - type + - file_search + RunStepDetailsToolCallsFileSearchRankingOptionsObject: + title: File search tool call ranking options + type: object + description: The ranking options for the file search. + properties: + ranker: + type: string + description: The ranker used for the file search. + enum: + - default_2024_08_21 + score_threshold: + type: number + description: The score threshold for the file search. All values must be a + floating point number between 0 and 1. + minimum: 0 + maximum: 1 + required: + - ranker + - score_threshold + RunStepDetailsToolCallsFileSearchResultObject: + title: File search tool call result + type: object + description: A result instance of the file search. + x-oaiTypeLabel: map + properties: + file_id: + type: string + description: The ID of the file that result was found in. + file_name: + type: string + description: The name of the file that result was found in. + score: + type: number + description: The score of the result. All values must be a floating point number + between 0 and 1. + minimum: 0 + maximum: 1 + content: + type: array + description: The content of the result that was found. The content is only + included if requested via the include query parameter. + items: type: object properties: - object: - enum: - - list - type: string - data: - type: array - items: - $ref: '#/components/schemas/ProjectApiKey' - first_id: - type: string - last_id: - type: string - has_more: - type: boolean - ProjectApiKeyDeleteResponse: - required: - - object - - id - - deleted + type: + type: string + description: The type of the content. + enum: + - text + text: + type: string + description: The text content of the file. + required: + - file_id + - file_name + - score + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for + this type of tool call. + enum: + - function + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have + not been [submitted](/docs/api-reference/runs/submitToolOutputs) + yet. + nullable: true + required: + - name + - arguments + - output + required: + - id + - type + - function + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: + - tool_calls + tool_calls: + type: array + description: > + An array of tool calls the run step was involved in. These can be + associated with one of three types of tools: `code_interpreter`, + `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + RunStepObject: + type: object + title: Run steps + description: | + Represents a step in execution of a run. + properties: + id: + description: The identifier of the run step, which can be referenced in API + endpoints. + type: string + object: + description: The object type, which is always `thread.run.step`. + type: string + enum: + - thread.run.step + created_at: + description: The Unix timestamp (in seconds) for when the run step was created. + type: integer + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) + associated with the run step. + type: string + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. + type: string + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is + a part of. + type: string + type: + description: The type of run step, which can be either `message_creation` or + `tool_calls`. + type: string + enum: + - message_creation + - tool_calls + status: + description: The status of the run step, which can be either `in_progress`, + `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: + - in_progress + - cancelled + - failed + - completed + - expired + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: + type: object + description: The last error associated with this run step. Will be `null` if + there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: + - server_error + - rate_limit_exceeded + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A + step is considered expired if the parent run is expired. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage + x-oaiMeta: + name: The run step object + beta: true + example: | + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + RunStepStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: + - thread.run.step.created + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.in_progress + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.delta + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run + step](/docs/api-reference/run-steps/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step + delta](/docs/api-reference/assistants-streaming/run-step-delta-ob\ + ject)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.completed + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.failed + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.cancelled + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.step.expired + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/run-steps/step-object) + expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/run-steps/step-object)" + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: + - thread.run.created + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.queued + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a + `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.in_progress + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an + `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.requires_action + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a + `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.completed + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.incomplete + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with + status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.failed + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.cancelling + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a + `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.cancelled + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: + - thread.run.expired + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + RunToolCallObject: + type: object + description: Tool call objects + properties: + id: + type: string + description: The ID of the tool call. This ID must be referenced when you submit + the tool outputs in using the [Submit tool outputs to + run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + description: The type of tool call the output is required for. For now, this is + always `function`. + enum: + - function + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function + StaticChunkingStrategy: + type: object + additionalProperties: false + properties: + max_chunk_size_tokens: + type: integer + minimum: 100 + maximum: 4096 + description: The maximum number of tokens in each chunk. The default value is + `800`. The minimum value is `100` and the maximum value is `4096`. + chunk_overlap_tokens: + type: integer + description: > + The number of tokens that overlap between chunks. The default value + is `400`. + + + Note that the overlap must not exceed half of + `max_chunk_size_tokens`. + required: + - max_chunk_size_tokens + - chunk_overlap_tokens + StaticChunkingStrategyRequestParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: + - static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + StaticChunkingStrategyResponseParam: + type: object + title: Static Chunking Strategy + additionalProperties: false + properties: + type: + type: string + description: Always `static`. + enum: + - static + static: + $ref: "#/components/schemas/StaticChunkingStrategy" + required: + - type + - static + SubmitToolOutputsRunRequest: + type: object + additionalProperties: false + properties: + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: type: object properties: - object: - enum: - - organization.project.api_key.deleted - type: string - id: - type: string - deleted: - type: boolean - securitySchemes: - ApiKeyAuth: - type: http - scheme: bearer + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the + run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: > + If `true`, returns a stream of events that happen during the Run as + server-sent events, terminating when the Run enters a terminal state + with a `data: [DONE]` message. + required: + - tool_outputs + ThreadObject: + type: object + title: Thread + description: Represents a thread that contains + [messages](/docs/api-reference/messages). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread`. + type: string + enum: + - thread + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: + type: object + description: > + A set of resources that are made available to the assistant's tools + in this thread. The resources are specific to the type of tool. For + example, the `code_interpreter` tool requires a list of file IDs, + while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: > + A list of [file](/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a + maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: > + The [vector store](/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector + store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - created_at + - tool_resources + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + ThreadStreamEvent: + oneOf: + - type: object + properties: + enabled: + type: boolean + description: Whether to enable input audio transcription. + event: + type: string + enum: + - thread.created + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is + created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + TranscriptionSegment: + type: object + properties: + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: + type: array + items: + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, + consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, + consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher + than 1.0 and the `avg_logprob` is below -1, consider this segment + silent. + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + TranscriptionWord: + type: object + properties: + word: + type: string + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: + - word + - start + - end + TruncationObject: + type: object + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use + this to control the intial context window of the run. + properties: + type: + type: string + description: The truncation strategy to use for the thread. The default is + `auto`. If set to `last_messages`, the thread will be truncated to + the n most recent messages in the thread. When set to `auto`, + messages in the middle of the thread will be dropped to fit the + context length of the model, `max_prompt_tokens`. + enum: + - auto + - last_messages + last_messages: + type: integer + description: The number of most recent messages from the thread when + constructing the context for the run. + minimum: 1 + nullable: true + required: + - type + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + Upload: + type: object + title: Upload + description: | + The Upload object can accept byte chunks in the form of Parts. + properties: + id: + type: string + description: The Upload unique identifier, which can be referenced in API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + filename: + type: string + description: The name of the file to be uploaded. + bytes: + type: integer + description: The intended number of bytes to be uploaded. + purpose: + type: string + description: The intended purpose of the file. [Please refer + here](/docs/api-reference/files/object#files/object-purpose) for + acceptable values. + status: + type: string + description: The status of the Upload. + enum: + - pending + - completed + - cancelled + - expired + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the Upload was created. + object: + type: string + description: The object type, which is always "upload". + enum: + - upload + file: + $ref: "#/components/schemas/OpenAIFile" + nullable: true + description: The ready File object after the Upload is completed. + required: + - bytes + - created_at + - expires_at + - filename + - id + - purpose + - status + x-oaiMeta: + name: The upload object + example: | + { + "id": "upload_abc123", + "object": "upload", + "bytes": 2147483648, + "created_at": 1719184911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + "status": "completed", + "expires_at": 1719127296, + "file": { + "id": "file-xyz321", + "object": "file", + "bytes": 2147483648, + "created_at": 1719186911, + "filename": "training_examples.jsonl", + "purpose": "fine-tune", + } + } + UploadPart: + type: object + title: UploadPart + description: > + The upload Part represents a chunk of bytes we can add to an Upload + object. + properties: + id: + type: string + description: The upload Part unique identifier, which can be referenced in API + endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the Part was created. + upload_id: + type: string + description: The ID of the Upload object that this Part was added to. + object: + type: string + description: The object type, which is always `upload.part`. + enum: + - upload.part + required: + - created_at + - id + - object + - upload_id + x-oaiMeta: + name: The upload part object + example: | + { + "id": "part_def456", + "object": "upload.part", + "created_at": 1719186911, + "upload_id": "upload_abc123" + } + UsageAudioSpeechesResult: + type: object + description: The aggregated audio speeches usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.audio_speeches.result + characters: + type: integer + description: The number of characters processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - characters + - num_model_requests + x-oaiMeta: + name: Audio speeches usage object + example: | + { + "object": "orgainzation.usage.audio_speeches.result", + "characters": 45, + "num_model_requests": 1, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "tts-1" + } + UsageAudioTranscriptionsResult: + type: object + description: The aggregated audio transcriptions usage details of the specific + time bucket. + properties: + object: + type: string + enum: + - organization.usage.audio_transcriptions.result + seconds: + type: integer + description: The number of seconds processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - seconds + - num_model_requests + x-oaiMeta: + name: Audio transcriptions usage object + example: | + { + "object": "orgainzation.usage.audio_transcriptions.result", + "seconds": 10, + "num_model_requests": 1, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "tts-1" + } + UsageCodeInterpreterSessionsResult: + type: object + description: The aggregated code interpreter sessions usage details of the + specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.code_interpreter_sessions.result + sessions: + type: integer + description: The number of code interpreter sessions. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + required: + - object + - sessions + x-oaiMeta: + name: Code interpreter sessions usage object + example: | + { + "object": "orgainzation.usage.code_interpreter_sessions.result", + "sessions": 1, + "project_id": "proj_abc" + } + UsageCompletionsResult: + type: object + description: The aggregated completions usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.completions.result + input_tokens: + type: integer + description: The number of input tokens used. + input_cached_tokens: + type: integer + description: The number of input tokens that has been cached from previous + requests. + output_tokens: + type: integer + description: The number of output tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + batch: + type: boolean + description: When `group_by=batch`, this field tells whether the grouped usage + result is batch or not. + required: + - object + - input_tokens + - output_tokens + - num_model_requests + x-oaiMeta: + name: Completions usage object + example: | + { + "object": "orgainzation.usage.completions.result", + "input_tokens": 5000, + "output_tokens": 1000, + "input_cached_tokens": 4000, + "num_model_requests": 5, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "gpt-4o-mini-2024-07-18", + "batch": false + } + UsageEmbeddingsResult: + type: object + description: The aggregated embeddings usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.embeddings.result + input_tokens: + type: integer + description: The number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - input_tokens + - num_model_requests + x-oaiMeta: + name: Embeddings usage object + example: | + { + "object": "orgainzation.usage.embeddings.result", + "input_tokens": 20, + "num_model_requests": 2, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "text-embedding-ada-002-v2" + } + UsageImagesResult: + type: object + description: The aggregated images usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.images.result + images: + type: integer + description: The number of images processed. + num_model_requests: + type: integer + description: The count of requests made to the model. + source: + type: string + description: When `group_by=source`, this field provides the source of the + grouped usage result, possible values are `image.generation`, + `image.edit`, `image.variation`. + size: + type: string + description: When `group_by=size`, this field provides the image size of the + grouped usage result. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - images + - num_model_requests + x-oaiMeta: + name: Images usage object + example: | + { + "object": "orgainzation.usage.images.result", + "images": 2, + "num_model_requests": 2, + "size": "1024x1024", + "source": "image.generation", + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "dall-e-3" + } + UsageModerationsResult: + type: object + description: The aggregated moderations usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.moderations.result + input_tokens: + type: integer + description: The number of input tokens used. + num_model_requests: + type: integer + description: The count of requests made to the model. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + user_id: + type: string + description: When `group_by=user_id`, this field provides the user ID of the + grouped usage result. + api_key_id: + type: string + description: When `group_by=api_key_id`, this field provides the API key ID of + the grouped usage result. + model: + type: string + description: When `group_by=model`, this field provides the model name of the + grouped usage result. + required: + - object + - input_tokens + - num_model_requests + x-oaiMeta: + name: Moderations usage object + example: | + { + "object": "orgainzation.usage.moderations.result", + "input_tokens": 20, + "num_model_requests": 2, + "project_id": "proj_abc", + "user_id": "user-abc", + "api_key_id": "key_abc", + "model": "text-moderation" + } + UsageResponse: + type: object + properties: + object: + type: string + enum: + - page + data: + type: array + items: + $ref: "#/components/schemas/UsageTimeBucket" + has_more: + type: boolean + next_page: + type: string + required: + - object + - data + - has_more + - next_page + UsageTimeBucket: + type: object + properties: + object: + type: string + enum: + - bucket + start_time: + type: integer + end_time: + type: integer + result: + type: array + items: + oneOf: + - $ref: "#/components/schemas/UsageCompletionsResult" + - $ref: "#/components/schemas/UsageEmbeddingsResult" + - $ref: "#/components/schemas/UsageModerationsResult" + - $ref: "#/components/schemas/UsageImagesResult" + - $ref: "#/components/schemas/UsageAudioSpeechesResult" + - $ref: "#/components/schemas/UsageAudioTranscriptionsResult" + - $ref: "#/components/schemas/UsageVectorStoresResult" + - $ref: "#/components/schemas/UsageCodeInterpreterSessionsResult" + - $ref: "#/components/schemas/CostsResult" + required: + - object + - start_time + - end_time + - result + UsageVectorStoresResult: + type: object + description: The aggregated vector stores usage details of the specific time bucket. + properties: + object: + type: string + enum: + - organization.usage.vector_stores.result + usage_bytes: + type: integer + description: The vector stores usage in bytes. + project_id: + type: string + description: When `group_by=project_id`, this field provides the project ID of + the grouped usage result. + required: + - object + - usage_bytes + x-oaiMeta: + name: Vector stores usage object + example: | + { + "object": "orgainzation.usage.vector_stores.result", + "usage_bytes": 1024, + "project_id": "proj_abc" + } + User: + type: object + description: Represents an individual `user` within an organization. + properties: + object: + type: string + enum: + - organization.user + description: The object type, which is always `organization.user` + id: + type: string + description: The identifier, which can be referenced in API endpoints + name: + type: string + description: The name of the user + email: + type: string + description: The email address of the user + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + added_at: + type: integer + description: The Unix timestamp (in seconds) of when the user was added. + required: + - object + - id + - name + - email + - role + - added_at + x-oaiMeta: + name: The user object + example: | + { + "object": "organization.user", + "id": "user_abc", + "name": "First Last", + "email": "user@example.com", + "role": "owner", + "added_at": 1711471533 + } + UserDeleteResponse: + type: object + properties: + object: + type: string + enum: + - organization.user.deleted + id: + type: string + deleted: + type: boolean + required: + - object + - id + - deleted + UserListResponse: + type: object + properties: + object: + type: string + enum: + - list + data: + type: array + items: + $ref: "#/components/schemas/User" + first_id: + type: string + last_id: + type: string + has_more: + type: boolean + required: + - object + - data + - first_id + - last_id + - has_more + UserRoleUpdateRequest: + type: object + properties: + role: + type: string + enum: + - owner + - reader + description: "`owner` or `reader`" + required: + - role + VectorStoreExpirationAfter: + type: object + title: Vector store expiration policy + description: The expiration policy for a vector store. + properties: + anchor: + description: "Anchor timestamp after which the expiration policy applies. + Supported anchors: `last_active_at`." + type: string + enum: + - last_active_at + days: + description: The number of days after the anchor time that the vector store will + expire. + type: integer + minimum: 1 + maximum: 365 + required: + - anchor + - days + VectorStoreFileBatchObject: + type: object + title: Vector store file batch + description: A batch of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file_batch`. + type: string + enum: + - vector_store.files_batch + created_at: + description: The Unix timestamp (in seconds) for when the vector store files + batch was created. + type: integer + vector_store_id: + description: The ID of the [vector + store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store files batch, which can be either + `in_progress`, `completed`, `cancelled` or `failed`. + type: string + enum: + - in_progress + - completed + - cancelled + - failed + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + x-oaiMeta: + name: The vector store files batch object + beta: true + example: | + { + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 + } + } + VectorStoreFileObject: + type: object + title: Vector store files + description: A list of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file`. + type: string + enum: + - vector_store.file + usage_bytes: + description: The total vector store usage in bytes. Note that this may be + different from the original file size. + type: integer + created_at: + description: The Unix timestamp (in seconds) for when the vector store file was + created. + type: integer + vector_store_id: + description: The ID of the [vector + store](/docs/api-reference/vector-stores/object) that the + [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store file, which can be either + `in_progress`, `completed`, `cancelled`, or `failed`. The status + `completed` indicates that the vector store file is ready for use. + type: string + enum: + - in_progress + - completed + - cancelled + - failed + last_error: + type: object + description: The last error associated with this vector store file. Will be + `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: + - server_error + - unsupported_file + - invalid_file + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + chunking_strategy: + type: object + description: The strategy used to chunk the file. + oneOf: + - $ref: "#/components/schemas/StaticChunkingStrategyResponseParam" + - $ref: "#/components/schemas/OtherChunkingStrategyResponseParam" + x-oaiExpandable: true + required: + - id + - object + - usage_bytes + - created_at + - vector_store_id + - status + - last_error + x-oaiMeta: + name: The vector store file object + beta: true + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null, + "chunking_strategy": { + "type": "static", + "static": { + "max_chunk_size_tokens": 800, + "chunk_overlap_tokens": 400 + } + } + } + VectorStoreObject: + type: object + title: Vector store + description: A vector store is a collection of processed files can be used by + the `file_search` tool. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store`. + type: string + enum: + - vector_store + created_at: + description: The Unix timestamp (in seconds) for when the vector store was + created. + type: integer + name: + description: The name of the vector store. + type: string + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, + `in_progress`, or `completed`. A status of `completed` indicates + that the vector store is ready for use. + type: string + enum: + - expired + - in_progress + - completed + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will + expire. + type: integer + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last + active. + type: integer + nullable: true + metadata: + description: > + Set of 16 key-value pairs that can be attached to an object. This + can be useful for storing additional information about the object in + a structured format. Keys can be a maximum of 64 characters long and + values can be a maximum of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata + x-oaiMeta: + name: The vector store object + beta: true + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } + securitySchemes: + ApiKeyAuth: + type: http + scheme: bearer security: - - ApiKeyAuth: [ ] -tags: - - name: Assistants - description: Build Assistants that can call models and use tools. - - name: Audio - description: Turn audio into text or text into audio. - - name: Chat - description: 'Given a list of messages comprising a conversation, the model will return a response.' - - name: Completions - description: 'Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.' - - name: Embeddings - description: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. - - name: Fine-tuning - description: Manage fine-tuning jobs to tailor a model to your specific training data. - - name: Batch - description: Create large batches of API requests to run asynchronously. - - name: Files - description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. - - name: Uploads - description: Use Uploads to upload large files in multiple parts. - - name: Images - description: 'Given a prompt and/or an input image, the model will generate a new image.' - - name: Models - description: List and describe the various models available in the API. - - name: Moderations - description: 'Given a input text, outputs if the model classifies it as potentially harmful.' - - name: Audit Logs - description: List user actions and configuration changes within this organization. + - ApiKeyAuth: [] x-oaiMeta: - navigationGroups: - - id: endpoints - title: Endpoints - - id: assistants - title: Assistants - - id: administration - title: Administration - - id: legacy - title: Legacy - groups: - - id: audio - title: Audio - description: "Learn how to turn audio into text or text into audio.\n\nRelated guide: [Speech to text](/docs/guides/speech-to-text)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createSpeech - path: createSpeech - - type: endpoint - key: createTranscription - path: createTranscription - - type: endpoint - key: createTranslation - path: createTranslation - - type: object - key: CreateTranscriptionResponseJson - path: json-object - - type: object - key: CreateTranscriptionResponseVerboseJson - path: verbose-json-object - - id: chat - title: Chat - description: "Given a list of messages comprising a conversation, the model will return a response.\n\nRelated guide: [Chat Completions](/docs/guides/text-generation)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createChatCompletion - path: create - - type: object - key: CreateChatCompletionResponse - path: object - - type: object - key: CreateChatCompletionStreamResponse - path: streaming - - id: embeddings - title: Embeddings - description: "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.\n\nRelated guide: [Embeddings](/docs/guides/embeddings)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createEmbedding - path: create - - type: object - key: Embedding - path: object - - id: fine-tuning - title: Fine-tuning - description: "Manage fine-tuning jobs to tailor a model to your specific training data.\n\nRelated guide: [Fine-tune models](/docs/guides/fine-tuning)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createFineTuningJob - path: create - - type: endpoint - key: listPaginatedFineTuningJobs - path: list - - type: endpoint - key: listFineTuningEvents - path: list-events - - type: endpoint - key: listFineTuningJobCheckpoints - path: list-checkpoints - - type: endpoint - key: retrieveFineTuningJob - path: retrieve - - type: endpoint - key: cancelFineTuningJob - path: cancel - - type: object - key: FinetuneChatRequestInput - path: chat-input - - type: object - key: FinetuneCompletionRequestInput - path: completions-input - - type: object - key: FineTuningJob - path: object - - type: object - key: FineTuningJobEvent - path: event-object - - type: object - key: FineTuningJobCheckpoint - path: checkpoint-object - - id: batch - title: Batch - description: "Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount.\n\nRelated guide: [Batch](/docs/guides/batch)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createBatch - path: create - - type: endpoint - key: retrieveBatch - path: retrieve - - type: endpoint - key: cancelBatch - path: cancel - - type: endpoint - key: listBatches - path: list - - type: object - key: Batch - path: object - - type: object - key: BatchRequestInput - path: request-input - - type: object - key: BatchRequestOutput - path: request-output - - id: files - title: Files - description: "Files are used to upload documents that can be used with features like [Assistants](/docs/api-reference/assistants), [Fine-tuning](/docs/api-reference/fine-tuning), and [Batch API](/docs/guides/batch).\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createFile - path: create - - type: endpoint - key: listFiles - path: list - - type: endpoint - key: retrieveFile - path: retrieve - - type: endpoint - key: deleteFile - path: delete - - type: endpoint - key: downloadFile - path: retrieve-contents - - type: object - key: OpenAIFile - path: object - - id: uploads - title: Uploads - description: "Allows you to upload large files in multiple parts.\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createUpload - path: create - - type: endpoint - key: addUploadPart - path: add-part - - type: endpoint - key: completeUpload - path: complete - - type: endpoint - key: cancelUpload - path: cancel - - type: object - key: Upload - path: object - - type: object - key: UploadPart - path: part-object - - id: images - title: Images - description: "Given a prompt and/or an input image, the model will generate a new image.\n\nRelated guide: [Image generation](/docs/guides/images)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createImage - path: create - - type: endpoint - key: createImageEdit - path: createEdit - - type: endpoint - key: createImageVariation - path: createVariation - - type: object - key: Image - path: object - - id: models - title: Models - description: "List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them.\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: listModels - path: list - - type: endpoint - key: retrieveModel - path: retrieve - - type: endpoint - key: deleteModel - path: delete - - type: object - key: Model - path: object - - id: moderations - title: Moderations - description: "Given some input text, outputs if the model classifies it as potentially harmful across several categories.\n\nRelated guide: [Moderations](/docs/guides/moderation)\n" - navigationGroup: endpoints - sections: - - type: endpoint - key: createModeration - path: create - - type: object - key: CreateModerationResponse - path: object - - id: assistants - title: Assistants - beta: true - description: "Build assistants that can call models and use tools to perform tasks.\n\n[Get started with the Assistants API](/docs/assistants)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createAssistant - path: createAssistant - - type: endpoint - key: listAssistants - path: listAssistants - - type: endpoint - key: getAssistant - path: getAssistant - - type: endpoint - key: modifyAssistant - path: modifyAssistant - - type: endpoint - key: deleteAssistant - path: deleteAssistant - - type: object - key: AssistantObject - path: object - - id: threads - title: Threads - beta: true - description: "Create threads that assistants can interact with.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createThread - path: createThread - - type: endpoint - key: getThread - path: getThread - - type: endpoint - key: modifyThread - path: modifyThread - - type: endpoint - key: deleteThread - path: deleteThread - - type: object - key: ThreadObject - path: object - - id: messages - title: Messages - beta: true - description: "Create messages within threads\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createMessage - path: createMessage - - type: endpoint - key: listMessages - path: listMessages - - type: endpoint - key: getMessage - path: getMessage - - type: endpoint - key: modifyMessage - path: modifyMessage - - type: endpoint - key: deleteMessage - path: deleteMessage - - type: object - key: MessageObject - path: object - - id: runs - title: Runs - beta: true - description: "Represents an execution run on a thread.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createRun - path: createRun - - type: endpoint - key: createThreadAndRun - path: createThreadAndRun - - type: endpoint - key: listRuns - path: listRuns - - type: endpoint - key: getRun - path: getRun - - type: endpoint - key: modifyRun - path: modifyRun - - type: endpoint - key: submitToolOuputsToRun - path: submitToolOutputs - - type: endpoint - key: cancelRun - path: cancelRun - - type: object - key: RunObject - path: object - - id: run-steps - title: Run Steps - beta: true - description: "Represents the steps (model and tool calls) taken during the run.\n\nRelated guide: [Assistants](/docs/assistants/overview)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: listRunSteps - path: listRunSteps - - type: endpoint - key: getRunStep - path: getRunStep - - type: object - key: RunStepObject - path: step-object - - id: vector-stores - title: Vector Stores - beta: true - description: "Vector stores are used to store files for use by the `file_search` tool.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createVectorStore - path: create - - type: endpoint - key: listVectorStores - path: list - - type: endpoint - key: getVectorStore - path: retrieve - - type: endpoint - key: modifyVectorStore - path: modify - - type: endpoint - key: deleteVectorStore - path: delete - - type: object - key: VectorStoreObject - path: object - - id: vector-stores-files - title: Vector Store Files - beta: true - description: "Vector store files represent files inside a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createVectorStoreFile - path: createFile - - type: endpoint - key: listVectorStoreFiles - path: listFiles - - type: endpoint - key: getVectorStoreFile - path: getFile - - type: endpoint - key: deleteVectorStoreFile - path: deleteFile - - type: object - key: VectorStoreFileObject - path: file-object - - id: vector-stores-file-batches - title: Vector Store File Batches - beta: true - description: "Vector store file batches represent operations to add multiple files to a vector store.\n\nRelated guide: [File Search](/docs/assistants/tools/file-search)\n" - navigationGroup: assistants - sections: - - type: endpoint - key: createVectorStoreFileBatch - path: createBatch - - type: endpoint - key: getVectorStoreFileBatch - path: getBatch - - type: endpoint - key: cancelVectorStoreFileBatch - path: cancelBatch - - type: endpoint - key: listFilesInVectorStoreBatch - path: listBatchFiles - - type: object - key: VectorStoreFileBatchObject - path: batch-object - - id: assistants-streaming - title: Streaming - beta: true - description: "Stream the result of executing a Run or resuming a Run after submitting tool outputs.\n\nYou can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun),\n[Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs)\nendpoints by passing `\"stream\": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream.\n\nOur Node and Python SDKs provide helpful utilities to make streaming easy. Reference the\n[Assistants API quickstart](/docs/assistants/overview) to learn more.\n" - navigationGroup: assistants - sections: - - type: object - key: MessageDeltaObject - path: message-delta-object - - type: object - key: RunStepDeltaObject - path: run-step-delta-object - - type: object - key: AssistantStreamEvent - path: events - - id: administration - title: Overview - description: "Programmatically manage your organization. \n\nThe Audit Logs endpoint provides a log of all actions taken in the \norganization for security and monitoring purposes.\n\nTo access these endpoints please generate an Admin API Key through the [API Platform Organization overview](/organization/admin-keys). Admin API keys cannot be used for non-administration endpoints.\n\nFor best practices on setting up your organization, please refer to this [guide](/docs/guides/production-best-practices/setting-up-your-organization)\n" - navigationGroup: administration - - id: invite - title: Invites - description: Invite and manage invitations for an organization. Invited users are automatically added to the Default project. - navigationGroup: administration - sections: - - type: endpoint - key: list-invites - path: list - - type: endpoint - key: inviteUser - path: create - - type: endpoint - key: retrieve-invite - path: retrieve - - type: endpoint - key: delete-invite - path: delete - - type: object - key: Invite - path: object - - id: users - title: Users - description: "Manage users and their role in an organization. Users will be automatically added to the Default project.\n" - navigationGroup: administration - sections: - - type: endpoint - key: list-users - path: list - - type: endpoint - key: modify-user - path: modify - - type: endpoint - key: retrieve-user - path: retrieve - - type: endpoint - key: delete-user - path: delete - - type: object - key: User - path: object - - id: projects - title: Projects - description: "Manage the projects within an orgnanization includes creation, updating, and archiving or projects. \nThe Default project cannot be modified or archived. \n" - navigationGroup: administration - sections: - - type: endpoint - key: list-projects - path: list - - type: endpoint - key: create-project - path: create - - type: endpoint - key: retrieve-project - path: retrieve - - type: endpoint - key: modify-project - path: modify - - type: endpoint - key: archive-project - path: archive - - type: object - key: Project - path: object - - id: project-users - title: Project Users - description: "Manage users within a project, including adding, updating roles, and removing users. \nUsers cannot be removed from the Default project, unless they are being removed from the organization. \n" - navigationGroup: administration - sections: - - type: endpoint - key: list-project-users - path: list - - type: endpoint - key: create-project-user - path: creeate - - type: endpoint - key: retrieve-project-user - path: retrieve - - type: endpoint - key: modify-project-user - path: modify - - type: endpoint - key: delete-project-user - path: delete - - type: object - key: ProjectUser - path: object - - id: project-service-accounts - title: Project Service Accounts - description: "Manage service accounts within a project. A service account is a bot user that is not associated with a user. \nIf a user leaves an organization, their keys and membership in projects will no longer work. Service accounts \ndo not have this limitation. However, service accounts can also be deleted from a project.\n" - navigationGroup: administration - sections: - - type: endpoint - key: list-project-service-accounts - path: list - - type: endpoint - key: create-project-service-account - path: create - - type: endpoint - key: retrieve-project-service-account - path: retrieve - - type: endpoint - key: delete-project-service-account - path: delete - - type: object - key: ProjectServiceAccount - path: object - - id: project-api-keys - title: Project API Keys - description: "Manage API keys for a given project. Supports listing and deleting keys for users. \nThis API does not allow issuing keys for users, as users need to authorize themselves to generate keys. \n" - navigationGroup: administration - sections: - - type: endpoint - key: list-project-api-keys - path: list - - type: endpoint - key: retrieve-project-api-key - path: retrieve - - type: endpoint - key: delete-project-api-key - path: delete - - type: object - key: ProjectApiKey - path: object - - id: audit-logs - title: Audit Logs - description: "Logs of user actions and configuration changes within this organization. \n\nTo log events, you must activate logging in the [Organization Settings](/settings/organization/general). \nOnce activated, for security reasons, logging cannot be deactivated.\n" - navigationGroup: administration - sections: - - type: endpoint - key: list-audit-logs - path: list - - type: object - key: AuditLog - path: object - - id: completions - title: Completions - legacy: true - navigationGroup: legacy - description: "Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models.\n" - sections: - - type: endpoint - key: createCompletion - path: create - - type: object - key: CreateCompletionResponse - path: object \ No newline at end of file + navigationGroups: + - id: endpoints + title: Endpoints + - id: assistants + title: Assistants + beta: true + - id: administration + title: Administration + - id: realtime + title: Realtime + beta: true + - id: legacy + title: Legacy + groups: + - id: audio + title: Audio + description: | + Learn how to turn audio into text or text into audio. + + Related guide: [Speech to text](/docs/guides/speech-to-text) + navigationGroup: endpoints + sections: + - type: endpoint + key: createSpeech + path: createSpeech + - type: endpoint + key: createTranscription + path: createTranscription + - type: endpoint + key: createTranslation + path: createTranslation + - type: object + key: CreateTranscriptionResponseJson + path: json-object + - type: object + key: CreateTranscriptionResponseVerboseJson + path: verbose-json-object + - id: chat + title: Chat + description: > + Given a list of messages comprising a conversation, the model will + return a response. + + Related guide: [Chat Completions](/docs/guides/text-generation) + navigationGroup: endpoints + sections: + - type: endpoint + key: createChatCompletion + path: create + - type: object + key: CreateChatCompletionResponse + path: object + - type: object + key: CreateChatCompletionStreamResponse + path: streaming + - id: embeddings + title: Embeddings + description: > + Get a vector representation of a given input that can be easily consumed + by machine learning models and algorithms. + + Related guide: [Embeddings](/docs/guides/embeddings) + navigationGroup: endpoints + sections: + - type: endpoint + key: createEmbedding + path: create + - type: object + key: Embedding + path: object + - id: fine-tuning + title: Fine-tuning + description: > + Manage fine-tuning jobs to tailor a model to your specific training + data. + + Related guide: [Fine-tune models](/docs/guides/fine-tuning) + navigationGroup: endpoints + sections: + - type: endpoint + key: createFineTuningJob + path: create + - type: endpoint + key: listPaginatedFineTuningJobs + path: list + - type: endpoint + key: listFineTuningEvents + path: list-events + - type: endpoint + key: listFineTuningJobCheckpoints + path: list-checkpoints + - type: endpoint + key: retrieveFineTuningJob + path: retrieve + - type: endpoint + key: cancelFineTuningJob + path: cancel + - type: object + key: FinetuneChatRequestInput + path: chat-input + - type: object + key: FinetuneCompletionRequestInput + path: completions-input + - type: object + key: FineTuningJob + path: object + - type: object + key: FineTuningJobEvent + path: event-object + - type: object + key: FineTuningJobCheckpoint + path: checkpoint-object + - id: batch + title: Batch + description: > + Create large batches of API requests for asynchronous processing. The + Batch API returns completions within 24 hours for a 50% discount. + + Related guide: [Batch](/docs/guides/batch) + navigationGroup: endpoints + sections: + - type: endpoint + key: createBatch + path: create + - type: endpoint + key: retrieveBatch + path: retrieve + - type: endpoint + key: cancelBatch + path: cancel + - type: endpoint + key: listBatches + path: list + - type: object + key: Batch + path: object + - type: object + key: BatchRequestInput + path: request-input + - type: object + key: BatchRequestOutput + path: request-output + - id: files + title: Files + description: > + Files are used to upload documents that can be used with features like + [Assistants](/docs/api-reference/assistants), + [Fine-tuning](/docs/api-reference/fine-tuning), and [Batch + API](/docs/guides/batch). + navigationGroup: endpoints + sections: + - type: endpoint + key: createFile + path: create + - type: endpoint + key: listFiles + path: list + - type: endpoint + key: retrieveFile + path: retrieve + - type: endpoint + key: deleteFile + path: delete + - type: endpoint + key: downloadFile + path: retrieve-contents + - type: object + key: OpenAIFile + path: object + - id: uploads + title: Uploads + description: | + Allows you to upload large files in multiple parts. + navigationGroup: endpoints + sections: + - type: endpoint + key: createUpload + path: create + - type: endpoint + key: addUploadPart + path: add-part + - type: endpoint + key: completeUpload + path: complete + - type: endpoint + key: cancelUpload + path: cancel + - type: object + key: Upload + path: object + - type: object + key: UploadPart + path: part-object + - id: images + title: Images + description: > + Given a prompt and/or an input image, the model will generate a new + image. + + Related guide: [Image generation](/docs/guides/images) + navigationGroup: endpoints + sections: + - type: endpoint + key: createImage + path: create + - type: endpoint + key: createImageEdit + path: createEdit + - type: endpoint + key: createImageVariation + path: createVariation + - type: object + key: Image + path: object + - id: models + title: Models + description: > + List and describe the various models available in the API. You can refer + to the [Models](/docs/models) documentation to understand what models + are available and the differences between them. + navigationGroup: endpoints + sections: + - type: endpoint + key: listModels + path: list + - type: endpoint + key: retrieveModel + path: retrieve + - type: endpoint + key: deleteModel + path: delete + - type: object + key: Model + path: object + - id: moderations + title: Moderations + description: > + Given text and/or image inputs, classifies if those inputs are + potentially harmful across several categories. + + Related guide: [Moderations](/docs/guides/moderation) + navigationGroup: endpoints + sections: + - type: endpoint + key: createModeration + path: create + - type: object + key: CreateModerationResponse + path: object + - id: assistants + title: Assistants + beta: true + description: | + Build assistants that can call models and use tools to perform tasks. + + [Get started with the Assistants API](/docs/assistants) + navigationGroup: assistants + sections: + - type: endpoint + key: createAssistant + path: createAssistant + - type: endpoint + key: listAssistants + path: listAssistants + - type: endpoint + key: getAssistant + path: getAssistant + - type: endpoint + key: modifyAssistant + path: modifyAssistant + - type: endpoint + key: deleteAssistant + path: deleteAssistant + - type: object + key: AssistantObject + path: object + - id: threads + title: Threads + beta: true + description: | + Create threads that assistants can interact with. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createThread + path: createThread + - type: endpoint + key: getThread + path: getThread + - type: endpoint + key: modifyThread + path: modifyThread + - type: endpoint + key: deleteThread + path: deleteThread + - type: object + key: ThreadObject + path: object + - id: messages + title: Messages + beta: true + description: | + Create messages within threads + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createMessage + path: createMessage + - type: endpoint + key: listMessages + path: listMessages + - type: endpoint + key: getMessage + path: getMessage + - type: endpoint + key: modifyMessage + path: modifyMessage + - type: endpoint + key: deleteMessage + path: deleteMessage + - type: object + key: MessageObject + path: object + - id: runs + title: Runs + beta: true + description: | + Represents an execution run on a thread. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createRun + path: createRun + - type: endpoint + key: createThreadAndRun + path: createThreadAndRun + - type: endpoint + key: listRuns + path: listRuns + - type: endpoint + key: getRun + path: getRun + - type: endpoint + key: modifyRun + path: modifyRun + - type: endpoint + key: submitToolOuputsToRun + path: submitToolOutputs + - type: endpoint + key: cancelRun + path: cancelRun + - type: object + key: RunObject + path: object + - id: run-steps + title: Run steps + beta: true + description: | + Represents the steps (model and tool calls) taken during the run. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: listRunSteps + path: listRunSteps + - type: endpoint + key: getRunStep + path: getRunStep + - type: object + key: RunStepObject + path: step-object + - id: vector-stores + title: Vector stores + beta: true + description: | + Vector stores are used to store files for use by the `file_search` tool. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStore + path: create + - type: endpoint + key: listVectorStores + path: list + - type: endpoint + key: getVectorStore + path: retrieve + - type: endpoint + key: modifyVectorStore + path: modify + - type: endpoint + key: deleteVectorStore + path: delete + - type: object + key: VectorStoreObject + path: object + - id: vector-stores-files + title: Vector store files + beta: true + description: | + Vector store files represent files inside a vector store. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStoreFile + path: createFile + - type: endpoint + key: listVectorStoreFiles + path: listFiles + - type: endpoint + key: getVectorStoreFile + path: getFile + - type: endpoint + key: deleteVectorStoreFile + path: deleteFile + - type: object + key: VectorStoreFileObject + path: file-object + - id: vector-stores-file-batches + title: Vector store file batches + beta: true + description: > + Vector store file batches represent operations to add multiple files to + a vector store. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStoreFileBatch + path: createBatch + - type: endpoint + key: getVectorStoreFileBatch + path: getBatch + - type: endpoint + key: cancelVectorStoreFileBatch + path: cancelBatch + - type: endpoint + key: listFilesInVectorStoreBatch + path: listBatchFiles + - type: object + key: VectorStoreFileBatchObject + path: batch-object + - id: assistants-streaming + title: Streaming + beta: true + description: > + Stream the result of executing a Run or resuming a Run after submitting + tool outputs. + + You can stream events from the [Create Thread and + Run](/docs/api-reference/runs/createThreadAndRun), + + [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool + Outputs](/docs/api-reference/runs/submitToolOutputs) + + endpoints by passing `"stream": true`. The response will be a + [Server-Sent + events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) + stream. + + Our Node and Python SDKs provide helpful utilities to make streaming + easy. Reference the + + [Assistants API quickstart](/docs/assistants/overview) to learn more. + navigationGroup: assistants + sections: + - type: object + key: MessageDeltaObject + path: message-delta-object + - type: object + key: RunStepDeltaObject + path: run-step-delta-object + - type: object + key: AssistantStreamEvent + path: events + - id: administration + title: Administration + description: > + Programmatically manage your organization. + + The Audit Logs endpoint provides a log of all actions taken in + the organization for security and monitoring purposes. + + To access these endpoints please generate an Admin API Key through the + [API Platform Organization overview](/organization/admin-keys). Admin + API keys cannot be used for non-administration endpoints. + + For best practices on setting up your organization, please refer to this + [guide](/docs/guides/production-best-practices#setting-up-your-organization) + navigationGroup: administration + - id: invite + title: Invites + description: Invite and manage invitations for an organization. Invited users + are automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-invites + path: list + - type: endpoint + key: inviteUser + path: create + - type: endpoint + key: retrieve-invite + path: retrieve + - type: endpoint + key: delete-invite + path: delete + - type: object + key: Invite + path: object + - id: users + title: Users + description: > + Manage users and their role in an organization. Users will be + automatically added to the Default project. + navigationGroup: administration + sections: + - type: endpoint + key: list-users + path: list + - type: endpoint + key: modify-user + path: modify + - type: endpoint + key: retrieve-user + path: retrieve + - type: endpoint + key: delete-user + path: delete + - type: object + key: User + path: object + - id: projects + title: Projects + description: > + Manage the projects within an orgnanization includes creation, updating, + and archiving or projects. + + The Default project cannot be modified or archived. + navigationGroup: administration + sections: + - type: endpoint + key: list-projects + path: list + - type: endpoint + key: create-project + path: create + - type: endpoint + key: retrieve-project + path: retrieve + - type: endpoint + key: modify-project + path: modify + - type: endpoint + key: archive-project + path: archive + - type: object + key: Project + path: object + - id: project-users + title: Project users + description: > + Manage users within a project, including adding, updating roles, and + removing users. + + Users cannot be removed from the Default project, unless they are being + removed from the organization. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-users + path: list + - type: endpoint + key: create-project-user + path: creeate + - type: endpoint + key: retrieve-project-user + path: retrieve + - type: endpoint + key: modify-project-user + path: modify + - type: endpoint + key: delete-project-user + path: delete + - type: object + key: ProjectUser + path: object + - id: project-service-accounts + title: Project service accounts + description: > + Manage service accounts within a project. A service account is a bot + user that is not associated with a user. + + If a user leaves an organization, their keys and membership in projects + will no longer work. Service accounts + + do not have this limitation. However, service accounts can also be + deleted from a project. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-service-accounts + path: list + - type: endpoint + key: create-project-service-account + path: create + - type: endpoint + key: retrieve-project-service-account + path: retrieve + - type: endpoint + key: delete-project-service-account + path: delete + - type: object + key: ProjectServiceAccount + path: object + - id: project-api-keys + title: Project API keys + description: > + Manage API keys for a given project. Supports listing and deleting keys + for users. + + This API does not allow issuing keys for users, as users need to + authorize themselves to generate keys. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-api-keys + path: list + - type: endpoint + key: retrieve-project-api-key + path: retrieve + - type: endpoint + key: delete-project-api-key + path: delete + - type: object + key: ProjectApiKey + path: object + - id: project-rate-limits + title: Project rate limits + description: > + Manage rate limits per model for projects. Rate limits may be configured + to be equal to or lower than the organization's rate limits. + navigationGroup: administration + sections: + - type: endpoint + key: list-project-rate-limits + path: list + - type: endpoint + key: update-project-rate-limits + path: update + - type: object + key: ProjectRateLimit + path: object + - id: audit-logs + title: Audit logs + description: > + Logs of user actions and configuration changes within this + organization. + + To log events, you must activate logging in the [Organization + Settings](/settings/organization/general). + + Once activated, for security reasons, logging cannot be deactivated. + navigationGroup: administration + sections: + - type: endpoint + key: list-audit-logs + path: list + - type: object + key: AuditLog + path: object + - id: usage + title: Usage + description: > + The **Usage API** provides detailed insights into your activity across + the OpenAI API. It also includes a separate [Costs + endpoint](/docs/api-reference/usage/costs), which offers visibility into + your spend, breaking down consumption by invoice line items and project + IDs. + + + While the Usage API delivers granular usage data, it may not always + reconcile perfectly with the Costs due to minor differences in how usage + and spend are recorded. For financial purposes, we recommend using the + [Costs endpoint](/docs/api-reference/usage/costs) or the [Costs + tab](/settings/organization/usage) in the Usage Dashboard, which will + reconcile back to your billing invoice. + navigationGroup: administration + sections: + - type: endpoint + key: usage-completions + path: completions + - type: object + key: UsageCompletionsResult + path: completions_object + - type: endpoint + key: usage-embeddings + path: embeddings + - type: object + key: UsageEmbeddingsResult + path: embeddings_object + - type: endpoint + key: usage-moderations + path: moderations + - type: object + key: UsageModerationsResult + path: moderations_object + - type: endpoint + key: usage-images + path: images + - type: object + key: UsageImagesResult + path: images_object + - type: endpoint + key: usage-audio-speeches + path: audio_speeches + - type: object + key: UsageAudioSpeechesResult + path: audio_speeches_object + - type: endpoint + key: usage-audio-transcriptions + path: audio_transcriptions + - type: object + key: UsageAudioTranscriptionsResult + path: audio_transcriptions_object + - type: endpoint + key: usage-vector-stores + path: vector_stores + - type: object + key: UsageVectorStoresResult + path: vector_stores_object + - type: endpoint + key: usage-code-interpreter-sessions + path: code_interpreter_sessions + - type: object + key: UsageCodeInterpreterSessionsResult + path: code_interpreter_sessions_object + - type: endpoint + key: usage-costs + path: costs + - type: object + key: CostsResult + path: costs_object + - id: realtime + title: Realtime + beta: true + description: > + Communicate with a GPT-4o class model live, in real time, over + WebSocket. + + Produces both audio and text transcriptions. + + [Learn more about the Realtime API](/docs/guides/realtime). + navigationGroup: realtime + - id: realtime-client-events + title: Client events + description: > + These are events that the OpenAI Realtime WebSocket server will accept + from the client. + navigationGroup: realtime + sections: + - type: object + key: RealtimeClientEventSessionUpdate + path: + - type: object + key: RealtimeClientEventInputAudioBufferAppend + path: + - type: object + key: RealtimeClientEventInputAudioBufferCommit + path: + - type: object + key: RealtimeClientEventInputAudioBufferClear + path: + - type: object + key: RealtimeClientEventConversationItemCreate + path: + - type: object + key: RealtimeClientEventConversationItemTruncate + path: + - type: object + key: RealtimeClientEventConversationItemDelete + path: + - type: object + key: RealtimeClientEventResponseCreate + path: + - type: object + key: RealtimeClientEventResponseCancel + path: + - id: realtime-server-events + title: Server events + description: > + These are events emitted from the OpenAI Realtime WebSocket server to + the client. + navigationGroup: realtime + sections: + - type: object + key: RealtimeServerEventError + path: + - type: object + key: RealtimeServerEventSessionCreated + path: + - type: object + key: RealtimeServerEventSessionUpdated + path: + - type: object + key: RealtimeServerEventConversationCreated + path: + - type: object + key: RealtimeServerEventConversationItemCreated + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionCompleted + path: + - type: object + key: RealtimeServerEventConversationItemInputAudioTranscriptionFailed + path: + - type: object + key: RealtimeServerEventConversationItemTruncated + path: + - type: object + key: RealtimeServerEventConversationItemDeleted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCommitted + path: + - type: object + key: RealtimeServerEventInputAudioBufferCleared + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStarted + path: + - type: object + key: RealtimeServerEventInputAudioBufferSpeechStopped + path: + - type: object + key: RealtimeServerEventResponseCreated + path: + - type: object + key: RealtimeServerEventResponseDone + path: + - type: object + key: RealtimeServerEventResponseOutputItemAdded + path: + - type: object + key: RealtimeServerEventResponseOutputItemDone + path: + - type: object + key: RealtimeServerEventResponseContentPartAdded + path: + - type: object + key: RealtimeServerEventResponseContentPartDone + path: + - type: object + key: RealtimeServerEventResponseTextDelta + path: + - type: object + key: RealtimeServerEventResponseTextDone + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDelta + path: + - type: object + key: RealtimeServerEventResponseAudioTranscriptDone + path: + - type: object + key: RealtimeServerEventResponseAudioDelta + path: + - type: object + key: RealtimeServerEventResponseAudioDone + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDelta + path: + - type: object + key: RealtimeServerEventResponseFunctionCallArgumentsDone + path: + - type: object + key: RealtimeServerEventRateLimitsUpdated + path: + - id: completions + title: Completions + legacy: true + navigationGroup: legacy + description: > + Given a prompt, the model will return one or more predicted completions + along with the probabilities of alternative tokens at each position. + Most developer should use our [Chat Completions + API](/docs/guides/text-generation#text-generation-models) to leverage + our best and newest models. + sections: + - type: endpoint + key: createCompletion + path: create + - type: object + key: CreateCompletionResponse + path: object