Skip to content

Commit

Permalink
Update openai client generation (#545)
Browse files Browse the repository at this point in the history
  • Loading branch information
fedefernandez authored Nov 17, 2023
1 parent f502e25 commit 91b282c
Show file tree
Hide file tree
Showing 44 changed files with 959 additions and 184 deletions.
11 changes: 11 additions & 0 deletions openai-client/client/.openapi-generator/FILES
Original file line number Diff line number Diff line change
Expand Up @@ -51,37 +51,48 @@ src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantFileRequ
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInner.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInnerLogprobs.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponseChoicesInner.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponseUsage.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestHyperparameters.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestHyperparameters.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageEditRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateImageRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateMessageRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInner.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategoryScores.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadAndRunRequestToolsInner.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateThreadRequest.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranscriptionRequestModel.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranscriptionResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/CreateTranslationResponse.kt
src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteAssistantFileResponse.kt
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ fun <T : Any> FormBuilder.appendGen(key: String, value: T, headers: Headers = He

@OptIn(InternalSerializationApi::class, ExperimentalSerializationApi::class)
fun <T : Enum<T>> serialNameOrEnumValue(v: Enum<T>): String =
v::class.serializerOrNull()?.descriptor?.getElementName(v.ordinal) ?: v.name
v::class.serializerOrNull()?.descriptor?.getElementName(v.ordinal) ?: v.toString()
44 changes: 44 additions & 0 deletions openai-client/client/src/commonMain/kotlin/OpenAIModel.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
package ai.xef.openai

import kotlin.jvm.JvmInline
import kotlinx.serialization.KSerializer
import kotlinx.serialization.Serializable
import kotlinx.serialization.SerializationException
import kotlinx.serialization.builtins.serializer
import kotlinx.serialization.descriptors.SerialDescriptor
import kotlinx.serialization.encoding.Decoder
import kotlinx.serialization.encoding.Encoder

@Serializable(with = OpenAIModelSerializer::class)
sealed interface OpenAIModel<T> {

fun value(): String =
when (this) {
is CustomModel -> model
is StandardModel -> model.toString()
}
}

@Serializable @JvmInline value class CustomModel<T>(val model: String) : OpenAIModel<T>

@Serializable @JvmInline value class StandardModel<T>(val model: T) : OpenAIModel<T>

class OpenAIModelSerializer<T>(private val dataSerializer: KSerializer<T>) :
KSerializer<OpenAIModel<T>> {
override val descriptor: SerialDescriptor = dataSerializer.descriptor

override fun serialize(encoder: Encoder, value: OpenAIModel<T>) =
when (value) {
is CustomModel<T> -> String.serializer().serialize(encoder, value.model)
is StandardModel<T> -> dataSerializer.serialize(encoder, value.model)
}

override fun deserialize(decoder: Decoder) =
try {
StandardModel(dataSerializer.deserialize(decoder))
} catch (e: SerializationException) {
CustomModel(String.serializer().deserialize(decoder))
} catch (e: IllegalArgumentException) {
CustomModel(String.serializer().deserialize(decoder))
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ package com.xebia.functional.openai.apis

import com.xebia.functional.openai.infrastructure.*
import com.xebia.functional.openai.models.CreateSpeechRequest
import com.xebia.functional.openai.models.CreateTranscriptionRequestModel
import com.xebia.functional.openai.models.CreateTranscriptionResponse
import com.xebia.functional.openai.models.CreateTranslationResponse
import io.ktor.client.HttpClient
Expand Down Expand Up @@ -104,8 +105,7 @@ open class AudioApi : ApiClient {
@Suppress("UNCHECKED_CAST")
open suspend fun createTranscription(
file: io.ktor.client.request.forms.InputProvider,
model:
com.xebia.functional.openai.models.ext.transcription.create.CreateTranscriptionRequestModel,
model: CreateTranscriptionRequestModel,
language: kotlin.String? = null,
prompt: kotlin.String? = null,
responseFormat: ResponseFormatCreateTranscription? = ResponseFormatCreateTranscription.json,
Expand Down Expand Up @@ -160,8 +160,7 @@ open class AudioApi : ApiClient {
@Suppress("UNCHECKED_CAST")
open suspend fun createTranslation(
file: io.ktor.client.request.forms.InputProvider,
model:
com.xebia.functional.openai.models.ext.transcription.create.CreateTranscriptionRequestModel,
model: CreateTranscriptionRequestModel,
prompt: kotlin.String? = null,
responseFormat: kotlin.String? = "json",
temperature: kotlin.Double? = 0.toDouble()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
package com.xebia.functional.openai.apis

import com.xebia.functional.openai.infrastructure.*
import com.xebia.functional.openai.models.CreateImageEditRequestModel
import com.xebia.functional.openai.models.CreateImageRequest
import com.xebia.functional.openai.models.ImagesResponse
import io.ktor.client.HttpClient
Expand Down Expand Up @@ -109,8 +110,7 @@ open class ImagesApi : ApiClient {
image: io.ktor.client.request.forms.InputProvider,
prompt: kotlin.String,
mask: io.ktor.client.request.forms.InputProvider? = null,
model: com.xebia.functional.openai.models.ext.image.edit.create.CreateImageEditRequestModel? =
null,
model: CreateImageEditRequestModel? = null,
n: kotlin.Int? = 1,
size: PropertySizeCreateImageEdit? = PropertySizeCreateImageEdit._1024x1024,
responseFormat: ResponseFormatCreateImageEdit? = ResponseFormatCreateImageEdit.url,
Expand Down Expand Up @@ -182,8 +182,7 @@ open class ImagesApi : ApiClient {
@Suppress("UNCHECKED_CAST")
open suspend fun createImageVariation(
image: io.ktor.client.request.forms.InputProvider,
model: com.xebia.functional.openai.models.ext.image.edit.create.CreateImageEditRequestModel? =
null,
model: CreateImageEditRequestModel? = null,
n: kotlin.Int? = 1,
responseFormat: ResponseFormatCreateImageVariation? = ResponseFormatCreateImageVariation.url,
size: PropertySizeCreateImageVariation? = PropertySizeCreateImageVariation._1024x1024,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import kotlinx.serialization.json.Json

open class ApiClient(private val baseUrl: String) {

private lateinit var client: HttpClient
lateinit var client: HttpClient

constructor(
baseUrl: String,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ data class CreateChatCompletionRequest(
>,
@SerialName(value = "model")
@Required
val model: com.xebia.functional.openai.models.ext.chat.create.CreateChatCompletionRequestModel,
val model: ai.xef.openai.OpenAIModel<CreateChatCompletionRequestModel>,

/* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */
@SerialName(value = "frequency_penalty") val frequencyPenalty: kotlin.Double? = (0).toDouble(),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
/**
* Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*/
@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport")

package com.xebia.functional.openai.models

import kotlinx.serialization.*

/**
* ID of the model to use. See the
* [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on
* which models work with the Chat API.
*
* Values:
* gptMinus4Minus1106MinusPreview,gptMinus4MinusVisionMinusPreview,gptMinus4,gptMinus4Minus0314,gptMinus4Minus0613,gptMinus4Minus32k,gptMinus4Minus32kMinus0314,gptMinus4Minus32kMinus0613,gptMinus3Period5MinusTurbo,gptMinus3Period5MinusTurboMinus16k,gptMinus3Period5MinusTurboMinus0301,gptMinus3Period5MinusTurboMinus0613,gptMinus3Period5MinusTurboMinus16kMinus0613
*/
@Serializable
enum class CreateChatCompletionRequestModel(val value: kotlin.String) {

@SerialName(value = "gpt-4-1106-preview") gptMinus4Minus1106MinusPreview("gpt-4-1106-preview"),
@SerialName(value = "gpt-4-vision-preview")
gptMinus4MinusVisionMinusPreview("gpt-4-vision-preview"),
@SerialName(value = "gpt-4") gptMinus4("gpt-4"),
@SerialName(value = "gpt-4-0314") gptMinus4Minus0314("gpt-4-0314"),
@SerialName(value = "gpt-4-0613") gptMinus4Minus0613("gpt-4-0613"),
@SerialName(value = "gpt-4-32k") gptMinus4Minus32k("gpt-4-32k"),
@SerialName(value = "gpt-4-32k-0314") gptMinus4Minus32kMinus0314("gpt-4-32k-0314"),
@SerialName(value = "gpt-4-32k-0613") gptMinus4Minus32kMinus0613("gpt-4-32k-0613"),
@SerialName(value = "gpt-3.5-turbo") gptMinus3Period5MinusTurbo("gpt-3.5-turbo"),
@SerialName(value = "gpt-3.5-turbo-16k") gptMinus3Period5MinusTurboMinus16k("gpt-3.5-turbo-16k"),
@SerialName(value = "gpt-3.5-turbo-0301")
gptMinus3Period5MinusTurboMinus0301("gpt-3.5-turbo-0301"),
@SerialName(value = "gpt-3.5-turbo-0613")
gptMinus3Period5MinusTurboMinus0613("gpt-3.5-turbo-0613"),
@SerialName(value = "gpt-3.5-turbo-16k-0613")
gptMinus3Period5MinusTurboMinus16kMinus0613("gpt-3.5-turbo-16k-0613");

/**
* Override [toString()] to avoid using the enum variable name as the value, and instead use the
* actual value defined in the API spec file.
*
* This solves a problem when the variable name and its value are different, and ensures that the
* client sends the correct enum values to the server always.
*/
override fun toString(): kotlin.String = value

companion object {
/** Converts the provided [data] to a [String] on success, null otherwise. */
fun encode(data: kotlin.Any?): kotlin.String? =
if (data is CreateChatCompletionRequestModel) "$data" else null

/** Returns a valid [CreateChatCompletionRequestModel] for [data], null otherwise. */
fun decode(data: kotlin.Any?): CreateChatCompletionRequestModel? =
data?.let {
val normalizedData = "$it".lowercase()
values().firstOrNull { value -> it == value || normalizedData == "$value".lowercase() }
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ import kotlinx.serialization.encoding.*
data class CreateCompletionRequest(
@SerialName(value = "model")
@Required
val model: com.xebia.functional.openai.models.ext.completion.create.CreateCompletionRequestModel,
val model: ai.xef.openai.OpenAIModel<CreateCompletionRequestModel>,
@SerialName(value = "prompt")
@Required
val prompt:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/**
* Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*/
@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport")

package com.xebia.functional.openai.models

import kotlinx.serialization.*

/**
* ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see
* all of your available models, or see our [Model overview](/docs/models/overview) for descriptions
* of them.
*
* Values:
* babbageMinus002,davinciMinus002,gptMinus3Period5MinusTurboMinusInstruct,textMinusDavinciMinus003,textMinusDavinciMinus002,textMinusDavinciMinus001,codeMinusDavinciMinus002,textMinusCurieMinus001,textMinusBabbageMinus001,textMinusAdaMinus001
*/
@Serializable
enum class CreateCompletionRequestModel(val value: kotlin.String) {

@SerialName(value = "babbage-002") babbageMinus002("babbage-002"),
@SerialName(value = "davinci-002") davinciMinus002("davinci-002"),
@SerialName(value = "gpt-3.5-turbo-instruct")
gptMinus3Period5MinusTurboMinusInstruct("gpt-3.5-turbo-instruct"),
@SerialName(value = "text-davinci-003") textMinusDavinciMinus003("text-davinci-003"),
@SerialName(value = "text-davinci-002") textMinusDavinciMinus002("text-davinci-002"),
@SerialName(value = "text-davinci-001") textMinusDavinciMinus001("text-davinci-001"),
@SerialName(value = "code-davinci-002") codeMinusDavinciMinus002("code-davinci-002"),
@SerialName(value = "text-curie-001") textMinusCurieMinus001("text-curie-001"),
@SerialName(value = "text-babbage-001") textMinusBabbageMinus001("text-babbage-001"),
@SerialName(value = "text-ada-001") textMinusAdaMinus001("text-ada-001");

/**
* Override [toString()] to avoid using the enum variable name as the value, and instead use the
* actual value defined in the API spec file.
*
* This solves a problem when the variable name and its value are different, and ensures that the
* client sends the correct enum values to the server always.
*/
override fun toString(): kotlin.String = value

companion object {
/** Converts the provided [data] to a [String] on success, null otherwise. */
fun encode(data: kotlin.Any?): kotlin.String? =
if (data is CreateCompletionRequestModel) "$data" else null

/** Returns a valid [CreateCompletionRequestModel] for [data], null otherwise. */
fun decode(data: kotlin.Any?): CreateCompletionRequestModel? =
data?.let {
val normalizedData = "$it".lowercase()
values().firstOrNull { value -> it == value || normalizedData == "$value".lowercase() }
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ data class CreateEditRequest(
@SerialName(value = "instruction") @Required val instruction: kotlin.String,
@SerialName(value = "model")
@Required
val model: com.xebia.functional.openai.models.ext.edit.create.CreateEditRequestModel,
val model: ai.xef.openai.OpenAIModel<CreateEditRequestModel>,

/* The input text to use as a starting point for the edit. */
@SerialName(value = "input") val input: kotlin.String? = "",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/**
* Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*/
@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport")

package com.xebia.functional.openai.models

import kotlinx.serialization.*

/**
* ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model
* with this endpoint.
*
* Values: textMinusDavinciMinusEditMinus001,codeMinusDavinciMinusEditMinus001
*/
@Serializable
enum class CreateEditRequestModel(val value: kotlin.String) {

@SerialName(value = "text-davinci-edit-001")
textMinusDavinciMinusEditMinus001("text-davinci-edit-001"),
@SerialName(value = "code-davinci-edit-001")
codeMinusDavinciMinusEditMinus001("code-davinci-edit-001");

/**
* Override [toString()] to avoid using the enum variable name as the value, and instead use the
* actual value defined in the API spec file.
*
* This solves a problem when the variable name and its value are different, and ensures that the
* client sends the correct enum values to the server always.
*/
override fun toString(): kotlin.String = value

companion object {
/** Converts the provided [data] to a [String] on success, null otherwise. */
fun encode(data: kotlin.Any?): kotlin.String? =
if (data is CreateEditRequestModel) "$data" else null

/** Returns a valid [CreateEditRequestModel] for [data], null otherwise. */
fun decode(data: kotlin.Any?): CreateEditRequestModel? =
data?.let {
val normalizedData = "$it".lowercase()
values().firstOrNull { value -> it == value || normalizedData == "$value".lowercase() }
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ data class CreateEmbeddingRequest(
val input: com.xebia.functional.openai.models.ext.embedding.create.CreateEmbeddingRequestInput,
@SerialName(value = "model")
@Required
val model: com.xebia.functional.openai.models.ext.embedding.create.CreateEmbeddingRequestModel,
val model: ai.xef.openai.OpenAIModel<CreateEmbeddingRequestModel>,

/* The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). */
@SerialName(value = "encoding_format")
Expand Down
Loading

0 comments on commit 91b282c

Please sign in to comment.