diff --git a/README.md b/README.md
index a3f8e4d7..8157711d 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,9 @@ NOTES:
* There is https://github.com/openai/openai-java, which OpenAI describes as
"The official Java library for the OpenAI API", but:
1. That "official" library lags behind https://github.com/openai/openai-openapi/blob/master/openapi.yaml
- For example, as of 2025/02/12 is is **STILL** lacking OpenAI's Realtime API (https://platform.openai.com/docs/api-reference/realtime), which is my main use case.
+ For example: OpenAI's Realtime API (https://platform.openai.com/docs/api-reference/realtime),
+ which is my main use case, is in https://github.com/openai/openai-openapi/blob/master/openapi.yaml,
+ but as of 2025/03/28 it is **STILL** not in https://github.com/openai/openai-java. :/
2. `openai-java` is actually a nearly fully modernized Kotlin library, so the name
`openai-java` is legacy;
it really should be named `openai-kotlin`.
@@ -60,6 +62,13 @@ All of my changes can be seen at:
https://github.com/swooby/openai-openapi-kotlin/pull/1/files
## Updates
+Very similar to original generation.
+
+It usually takes me 1-2 hours to do this.
+More if there are more changes.
+Less is there are less changes.
+Keep in mind that some of this time is verifying/updating the below documentation of any changes.
+
When a new spec comes out:
1. Make sure to start from a fresh/stashed checkout.
2. `rm -r ./lib/src`
diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml
index edd5a183..aaa9d06e 100644
--- a/gradle/libs.versions.toml
+++ b/gradle/libs.versions.toml
@@ -3,6 +3,7 @@ kotlin = "2.0.21"
kotlintestRunnerJunit5 = "3.4.2"
squareupMoshiKotlin = "1.15.1"
squareupOkhttpBom = "4.12.0"
+spotless = "7.0.2"
[libraries]
kotlintest-runner-junit5 = { module = "io.kotlintest:kotlintest-runner-junit5", version.ref = "kotlintestRunnerJunit5" }
@@ -12,3 +13,4 @@ squareup-okhttp3 = { module = "com.squareup.okhttp3:okhttp" }
[plugins]
kotlin-jvm = { id = "org.jetbrains.kotlin.jvm", version.ref = "kotlin" }
+spotless = { id = "com.diffplug.spotless", version.ref = "spotless" }
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index 6059a5fa..cea7a793 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -1,6 +1,7 @@
-#Sun Dec 15 17:37:56 PST 2024
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-bin.zip
+networkTimeout=10000
+validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
diff --git a/lib/README.md b/lib/README.md
index 00d5c037..ce2dbd8c 100644
--- a/lib/README.md
+++ b/lib/README.md
@@ -7,7 +7,7 @@ This API client was generated by the [OpenAPI Generator](https://openapi-generat
- API version: 2.3.0
- Package version:
-- Generator version: 7.10.0
+- Generator version: 7.12.0
- Build package: org.openapitools.codegen.languages.KotlinClientCodegen
For more information, please visit [https://help.openai.com/](https://help.openai.com/)
@@ -77,7 +77,12 @@ All URIs are relative to *https://api.openai.com/v1*
| *BatchApi* | [**createBatch**](docs/BatchApi.md#createbatch) | **POST** /batches | Creates and executes a batch from an uploaded file of requests |
| *BatchApi* | [**listBatches**](docs/BatchApi.md#listbatches) | **GET** /batches | List your organization's batches. |
| *BatchApi* | [**retrieveBatch**](docs/BatchApi.md#retrievebatch) | **GET** /batches/{batch_id} | Retrieves a batch. |
-| *ChatApi* | [**createChatCompletion**](docs/ChatApi.md#createchatcompletion) | **POST** /chat/completions | Creates a model response for the given chat conversation. Learn more in the [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), and [audio](/docs/guides/audio) guides. Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](/docs/guides/reasoning). |
+| *ChatApi* | [**createChatCompletion**](docs/ChatApi.md#createchatcompletion) | **POST** /chat/completions | **Starting a new project?** We recommend trying [Responses](/docs/api-reference/responses) to take advantage of the latest OpenAI platform features. Compare [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses). --- Creates a model response for the given chat conversation. Learn more in the [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), and [audio](/docs/guides/audio) guides. Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](/docs/guides/reasoning). |
+| *ChatApi* | [**deleteChatCompletion**](docs/ChatApi.md#deletechatcompletion) | **DELETE** /chat/completions/{completion_id} | Delete a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` can be deleted. |
+| *ChatApi* | [**getChatCompletion**](docs/ChatApi.md#getchatcompletion) | **GET** /chat/completions/{completion_id} | Get a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. |
+| *ChatApi* | [**getChatCompletionMessages**](docs/ChatApi.md#getchatcompletionmessages) | **GET** /chat/completions/{completion_id}/messages | Get the messages in a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. |
+| *ChatApi* | [**listChatCompletions**](docs/ChatApi.md#listchatcompletions) | **GET** /chat/completions | List stored Chat Completions. Only Chat Completions that have been stored with the `store` parameter set to `true` will be returned. |
+| *ChatApi* | [**updateChatCompletion**](docs/ChatApi.md#updatechatcompletion) | **POST** /chat/completions/{completion_id} | Modify a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` can be modified. Currently, the only supported modification is to update the `metadata` field. |
| *CompletionsApi* | [**createCompletion**](docs/CompletionsApi.md#createcompletion) | **POST** /completions | Creates a completion for the provided prompt and parameters. |
| *DefaultApi* | [**adminApiKeysCreate**](docs/DefaultApi.md#adminapikeyscreate) | **POST** /organization/admin_api_keys | Create an organization admin API key |
| *DefaultApi* | [**adminApiKeysDelete**](docs/DefaultApi.md#adminapikeysdelete) | **DELETE** /organization/admin_api_keys/{key_id} | Delete an organization admin API key |
@@ -90,7 +95,10 @@ All URIs are relative to *https://api.openai.com/v1*
| *FilesApi* | [**listFiles**](docs/FilesApi.md#listfiles) | **GET** /files | Returns a list of files. |
| *FilesApi* | [**retrieveFile**](docs/FilesApi.md#retrievefile) | **GET** /files/{file_id} | Returns information about a specific file. |
| *FineTuningApi* | [**cancelFineTuningJob**](docs/FineTuningApi.md#cancelfinetuningjob) | **POST** /fine_tuning/jobs/{fine_tuning_job_id}/cancel | Immediately cancel a fine-tune job. |
+| *FineTuningApi* | [**createFineTuningCheckpointPermission**](docs/FineTuningApi.md#createfinetuningcheckpointpermission) | **POST** /fine_tuning/checkpoints/{permission_id}/permissions | **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). This enables organization owners to share fine-tuned models with other projects in their organization. |
| *FineTuningApi* | [**createFineTuningJob**](docs/FineTuningApi.md#createfinetuningjob) | **POST** /fine_tuning/jobs | Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](/docs/guides/fine-tuning) |
+| *FineTuningApi* | [**deleteFineTuningCheckpointPermission**](docs/FineTuningApi.md#deletefinetuningcheckpointpermission) | **DELETE** /fine_tuning/checkpoints/{permission_id}/permissions | **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). Organization owners can use this endpoint to delete a permission for a fine-tuned model checkpoint. |
+| *FineTuningApi* | [**listFineTuningCheckpointPermissions**](docs/FineTuningApi.md#listfinetuningcheckpointpermissions) | **GET** /fine_tuning/checkpoints/{permission_id}/permissions | **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). Organization owners can use this endpoint to view all permissions for a fine-tuned model checkpoint. |
| *FineTuningApi* | [**listFineTuningEvents**](docs/FineTuningApi.md#listfinetuningevents) | **GET** /fine_tuning/jobs/{fine_tuning_job_id}/events | Get status updates for a fine-tuning job. |
| *FineTuningApi* | [**listFineTuningJobCheckpoints**](docs/FineTuningApi.md#listfinetuningjobcheckpoints) | **GET** /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints | List checkpoints for a fine-tuning job. |
| *FineTuningApi* | [**listPaginatedFineTuningJobs**](docs/FineTuningApi.md#listpaginatedfinetuningjobs) | **GET** /fine_tuning/jobs | List your organization's fine-tuning jobs |
@@ -126,10 +134,15 @@ All URIs are relative to *https://api.openai.com/v1*
| *ProjectsApi* | [**retrieveProjectUser**](docs/ProjectsApi.md#retrieveprojectuser) | **GET** /organization/projects/{project_id}/users/{user_id} | Retrieves a user in the project. |
| *ProjectsApi* | [**updateProjectRateLimits**](docs/ProjectsApi.md#updateprojectratelimits) | **POST** /organization/projects/{project_id}/rate_limits/{rate_limit_id} | Updates a project rate limit. |
| *RealtimeApi* | [**createRealtimeSession**](docs/RealtimeApi.md#createrealtimesession) | **POST** /realtime/sessions | Create an ephemeral API token for use in client-side applications with the Realtime API. Can be configured with the same session parameters as the `session.update` client event. It responds with a session object, plus a `client_secret` key which contains a usable ephemeral API token that can be used to authenticate browser clients for the Realtime API. |
+| *RealtimeApi* | [**createRealtimeTranscriptionSession**](docs/RealtimeApi.md#createrealtimetranscriptionsession) | **POST** /realtime/transcription_sessions | Create an ephemeral API token for use in client-side applications with the Realtime API specifically for realtime transcriptions. Can be configured with the same session parameters as the `transcription_session.update` client event. It responds with a session object, plus a `client_secret` key which contains a usable ephemeral API token that can be used to authenticate browser clients for the Realtime API. |
+| *ResponsesApi* | [**createResponse**](docs/ResponsesApi.md#createresponse) | **POST** /responses | Creates a model response. Provide [text](/docs/guides/text) or [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own [custom code](/docs/guides/function-calling) or use built-in [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or [file search](/docs/guides/tools-file-search) to use your own data as input for the model's response. |
+| *ResponsesApi* | [**deleteResponse**](docs/ResponsesApi.md#deleteresponse) | **DELETE** /responses/{response_id} | Deletes a model response with the given ID. |
+| *ResponsesApi* | [**getResponse**](docs/ResponsesApi.md#getresponse) | **GET** /responses/{response_id} | Retrieves a model response with the given ID. |
+| *ResponsesApi* | [**listInputItems**](docs/ResponsesApi.md#listinputitems) | **GET** /responses/{response_id}/input_items | Returns a list of input items for a given response. |
| *UploadsApi* | [**addUploadPart**](docs/UploadsApi.md#adduploadpart) | **POST** /uploads/{upload_id}/parts | Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). |
| *UploadsApi* | [**cancelUpload**](docs/UploadsApi.md#cancelupload) | **POST** /uploads/{upload_id}/cancel | Cancels the Upload. No Parts may be added after an Upload is cancelled. |
| *UploadsApi* | [**completeUpload**](docs/UploadsApi.md#completeupload) | **POST** /uploads/{upload_id}/complete | Completes the [Upload](/docs/api-reference/uploads/object). Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. You can specify the order of the Parts by passing in an ordered list of the Part IDs. The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. |
-| *UploadsApi* | [**createUpload**](docs/UploadsApi.md#createupload) | **POST** /uploads | Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - [Assistants](/docs/assistants/tools/file-search#supported-files) For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). |
+| *UploadsApi* | [**createUpload**](docs/UploadsApi.md#createupload) | **POST** /uploads | Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. For certain `purpose` values, the correct `mime_type` must be specified. Please refer to documentation for the [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files). For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). |
| *UsageApi* | [**usageAudioSpeeches**](docs/UsageApi.md#usageaudiospeeches) | **GET** /organization/usage/audio_speeches | Get audio speeches usage details for the organization. |
| *UsageApi* | [**usageAudioTranscriptions**](docs/UsageApi.md#usageaudiotranscriptions) | **GET** /organization/usage/audio_transcriptions | Get audio transcriptions usage details for the organization. |
| *UsageApi* | [**usageCodeInterpreterSessions**](docs/UsageApi.md#usagecodeinterpretersessions) | **GET** /organization/usage/code_interpreter_sessions | Get code interpreter sessions usage details for the organization. |
@@ -156,6 +169,9 @@ All URIs are relative to *https://api.openai.com/v1*
| *VectorStoresApi* | [**listVectorStoreFiles**](docs/VectorStoresApi.md#listvectorstorefiles) | **GET** /vector_stores/{vector_store_id}/files | Returns a list of vector store files. |
| *VectorStoresApi* | [**listVectorStores**](docs/VectorStoresApi.md#listvectorstores) | **GET** /vector_stores | Returns a list of vector stores. |
| *VectorStoresApi* | [**modifyVectorStore**](docs/VectorStoresApi.md#modifyvectorstore) | **POST** /vector_stores/{vector_store_id} | Modifies a vector store. |
+| *VectorStoresApi* | [**retrieveVectorStoreFileContent**](docs/VectorStoresApi.md#retrievevectorstorefilecontent) | **GET** /vector_stores/{vector_store_id}/files/{file_id}/content | Retrieve the parsed contents of a vector store file. |
+| *VectorStoresApi* | [**searchVectorStore**](docs/VectorStoresApi.md#searchvectorstore) | **POST** /vector_stores/{vector_store_id}/search | Search a vector store for relevant chunks based on a query and file attributes filter. |
+| *VectorStoresApi* | [**updateVectorStoreFileAttributes**](docs/VectorStoresApi.md#updatevectorstorefileattributes) | **POST** /vector_stores/{vector_store_id}/files/{file_id} | Update attributes on a vector store file. |
@@ -165,10 +181,10 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.AdminApiKeyOwner](docs/AdminApiKeyOwner.md)
- [com.openai.models.AdminApiKeysCreateRequest](docs/AdminApiKeysCreateRequest.md)
- [com.openai.models.AdminApiKeysDelete200Response](docs/AdminApiKeysDelete200Response.md)
+ - [com.openai.models.Annotation](docs/Annotation.md)
- [com.openai.models.ApiKeyList](docs/ApiKeyList.md)
- [com.openai.models.ArrayOfContentPartsInner](docs/ArrayOfContentPartsInner.md)
- [com.openai.models.AssistantObject](docs/AssistantObject.md)
- - [com.openai.models.AssistantObjectResponseFormat](docs/AssistantObjectResponseFormat.md)
- [com.openai.models.AssistantObjectToolResources](docs/AssistantObjectToolResources.md)
- [com.openai.models.AssistantObjectToolResourcesCodeInterpreter](docs/AssistantObjectToolResourcesCodeInterpreter.md)
- [com.openai.models.AssistantObjectToolResourcesFileSearch](docs/AssistantObjectToolResourcesFileSearch.md)
@@ -233,8 +249,12 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.BatchRequestOutput](docs/BatchRequestOutput.md)
- [com.openai.models.BatchRequestOutputError](docs/BatchRequestOutputError.md)
- [com.openai.models.BatchRequestOutputResponse](docs/BatchRequestOutputResponse.md)
+ - [com.openai.models.ChatCompletionDeleted](docs/ChatCompletionDeleted.md)
- [com.openai.models.ChatCompletionFunctionCallOption](docs/ChatCompletionFunctionCallOption.md)
- [com.openai.models.ChatCompletionFunctions](docs/ChatCompletionFunctions.md)
+ - [com.openai.models.ChatCompletionList](docs/ChatCompletionList.md)
+ - [com.openai.models.ChatCompletionMessageList](docs/ChatCompletionMessageList.md)
+ - [com.openai.models.ChatCompletionMessageListDataInner](docs/ChatCompletionMessageListDataInner.md)
- [com.openai.models.ChatCompletionMessageToolCall](docs/ChatCompletionMessageToolCall.md)
- [com.openai.models.ChatCompletionMessageToolCallChunk](docs/ChatCompletionMessageToolCallChunk.md)
- [com.openai.models.ChatCompletionMessageToolCallChunkFunction](docs/ChatCompletionMessageToolCallChunkFunction.md)
@@ -251,6 +271,8 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.ChatCompletionRequestMessage](docs/ChatCompletionRequestMessage.md)
- [com.openai.models.ChatCompletionRequestMessageContentPartAudio](docs/ChatCompletionRequestMessageContentPartAudio.md)
- [com.openai.models.ChatCompletionRequestMessageContentPartAudioInputAudio](docs/ChatCompletionRequestMessageContentPartAudioInputAudio.md)
+ - [com.openai.models.ChatCompletionRequestMessageContentPartFile](docs/ChatCompletionRequestMessageContentPartFile.md)
+ - [com.openai.models.ChatCompletionRequestMessageContentPartFileFile](docs/ChatCompletionRequestMessageContentPartFileFile.md)
- [com.openai.models.ChatCompletionRequestMessageContentPartImage](docs/ChatCompletionRequestMessageContentPartImage.md)
- [com.openai.models.ChatCompletionRequestMessageContentPartImageImageUrl](docs/ChatCompletionRequestMessageContentPartImageImageUrl.md)
- [com.openai.models.ChatCompletionRequestMessageContentPartRefusal](docs/ChatCompletionRequestMessageContentPartRefusal.md)
@@ -265,6 +287,8 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.ChatCompletionRequestUserMessageContent](docs/ChatCompletionRequestUserMessageContent.md)
- [com.openai.models.ChatCompletionRequestUserMessageContentPart](docs/ChatCompletionRequestUserMessageContentPart.md)
- [com.openai.models.ChatCompletionResponseMessage](docs/ChatCompletionResponseMessage.md)
+ - [com.openai.models.ChatCompletionResponseMessageAnnotationsInner](docs/ChatCompletionResponseMessageAnnotationsInner.md)
+ - [com.openai.models.ChatCompletionResponseMessageAnnotationsInnerUrlCitation](docs/ChatCompletionResponseMessageAnnotationsInnerUrlCitation.md)
- [com.openai.models.ChatCompletionResponseMessageAudio](docs/ChatCompletionResponseMessageAudio.md)
- [com.openai.models.ChatCompletionResponseMessageFunctionCall](docs/ChatCompletionResponseMessageFunctionCall.md)
- [com.openai.models.ChatCompletionRole](docs/ChatCompletionRole.md)
@@ -276,10 +300,30 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.ChatCompletionTool](docs/ChatCompletionTool.md)
- [com.openai.models.ChatCompletionToolChoiceOption](docs/ChatCompletionToolChoiceOption.md)
- [com.openai.models.ChunkingStrategyRequestParam](docs/ChunkingStrategyRequestParam.md)
+ - [com.openai.models.Click](docs/Click.md)
+ - [com.openai.models.CodeInterpreterFileOutput](docs/CodeInterpreterFileOutput.md)
+ - [com.openai.models.CodeInterpreterFileOutputFilesInner](docs/CodeInterpreterFileOutputFilesInner.md)
+ - [com.openai.models.CodeInterpreterTextOutput](docs/CodeInterpreterTextOutput.md)
+ - [com.openai.models.CodeInterpreterTool](docs/CodeInterpreterTool.md)
+ - [com.openai.models.CodeInterpreterToolCall](docs/CodeInterpreterToolCall.md)
+ - [com.openai.models.CodeInterpreterToolOutput](docs/CodeInterpreterToolOutput.md)
+ - [com.openai.models.ComparisonFilter](docs/ComparisonFilter.md)
+ - [com.openai.models.ComparisonFilterValue](docs/ComparisonFilterValue.md)
- [com.openai.models.CompleteUploadRequest](docs/CompleteUploadRequest.md)
- [com.openai.models.CompletionUsage](docs/CompletionUsage.md)
- [com.openai.models.CompletionUsageCompletionTokensDetails](docs/CompletionUsageCompletionTokensDetails.md)
- [com.openai.models.CompletionUsagePromptTokensDetails](docs/CompletionUsagePromptTokensDetails.md)
+ - [com.openai.models.CompoundFilter](docs/CompoundFilter.md)
+ - [com.openai.models.CompoundFilterFiltersInner](docs/CompoundFilterFiltersInner.md)
+ - [com.openai.models.ComputerAction](docs/ComputerAction.md)
+ - [com.openai.models.ComputerScreenshotImage](docs/ComputerScreenshotImage.md)
+ - [com.openai.models.ComputerTool](docs/ComputerTool.md)
+ - [com.openai.models.ComputerToolCall](docs/ComputerToolCall.md)
+ - [com.openai.models.ComputerToolCallOutput](docs/ComputerToolCallOutput.md)
+ - [com.openai.models.ComputerToolCallOutputResource](docs/ComputerToolCallOutputResource.md)
+ - [com.openai.models.ComputerToolCallSafetyCheck](docs/ComputerToolCallSafetyCheck.md)
+ - [com.openai.models.Content](docs/Content.md)
+ - [com.openai.models.Coordinate](docs/Coordinate.md)
- [com.openai.models.CostsResult](docs/CostsResult.md)
- [com.openai.models.CostsResultAmount](docs/CostsResultAmount.md)
- [com.openai.models.CreateAssistantRequest](docs/CreateAssistantRequest.md)
@@ -293,22 +337,18 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.CreateChatCompletionFunctionResponse](docs/CreateChatCompletionFunctionResponse.md)
- [com.openai.models.CreateChatCompletionFunctionResponseChoicesInner](docs/CreateChatCompletionFunctionResponseChoicesInner.md)
- [com.openai.models.CreateChatCompletionRequest](docs/CreateChatCompletionRequest.md)
- - [com.openai.models.CreateChatCompletionRequestAudio](docs/CreateChatCompletionRequestAudio.md)
- - [com.openai.models.CreateChatCompletionRequestFunctionCall](docs/CreateChatCompletionRequestFunctionCall.md)
- - [com.openai.models.CreateChatCompletionRequestModel](docs/CreateChatCompletionRequestModel.md)
- - [com.openai.models.CreateChatCompletionRequestPrediction](docs/CreateChatCompletionRequestPrediction.md)
- - [com.openai.models.CreateChatCompletionRequestResponseFormat](docs/CreateChatCompletionRequestResponseFormat.md)
- - [com.openai.models.CreateChatCompletionRequestStop](docs/CreateChatCompletionRequestStop.md)
+ - [com.openai.models.CreateChatCompletionRequestAllOfAudio](docs/CreateChatCompletionRequestAllOfAudio.md)
+ - [com.openai.models.CreateChatCompletionRequestAllOfFunctionCall](docs/CreateChatCompletionRequestAllOfFunctionCall.md)
+ - [com.openai.models.CreateChatCompletionRequestAllOfPrediction](docs/CreateChatCompletionRequestAllOfPrediction.md)
+ - [com.openai.models.CreateChatCompletionRequestAllOfResponseFormat](docs/CreateChatCompletionRequestAllOfResponseFormat.md)
- [com.openai.models.CreateChatCompletionResponse](docs/CreateChatCompletionResponse.md)
- [com.openai.models.CreateChatCompletionResponseChoicesInner](docs/CreateChatCompletionResponseChoicesInner.md)
- [com.openai.models.CreateChatCompletionResponseChoicesInnerLogprobs](docs/CreateChatCompletionResponseChoicesInnerLogprobs.md)
- [com.openai.models.CreateChatCompletionStreamResponse](docs/CreateChatCompletionStreamResponse.md)
- [com.openai.models.CreateChatCompletionStreamResponseChoicesInner](docs/CreateChatCompletionStreamResponseChoicesInner.md)
- - [com.openai.models.CreateChatCompletionStreamResponseUsage](docs/CreateChatCompletionStreamResponseUsage.md)
- [com.openai.models.CreateCompletionRequest](docs/CreateCompletionRequest.md)
- [com.openai.models.CreateCompletionRequestModel](docs/CreateCompletionRequestModel.md)
- [com.openai.models.CreateCompletionRequestPrompt](docs/CreateCompletionRequestPrompt.md)
- - [com.openai.models.CreateCompletionRequestStop](docs/CreateCompletionRequestStop.md)
- [com.openai.models.CreateCompletionResponse](docs/CreateCompletionResponse.md)
- [com.openai.models.CreateCompletionResponseChoicesInner](docs/CreateCompletionResponseChoicesInner.md)
- [com.openai.models.CreateCompletionResponseChoicesInnerLogprobs](docs/CreateCompletionResponseChoicesInnerLogprobs.md)
@@ -317,6 +357,7 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.CreateEmbeddingRequestModel](docs/CreateEmbeddingRequestModel.md)
- [com.openai.models.CreateEmbeddingResponse](docs/CreateEmbeddingResponse.md)
- [com.openai.models.CreateEmbeddingResponseUsage](docs/CreateEmbeddingResponseUsage.md)
+ - [com.openai.models.CreateFineTuningCheckpointPermissionRequest](docs/CreateFineTuningCheckpointPermissionRequest.md)
- [com.openai.models.CreateFineTuningJobRequest](docs/CreateFineTuningJobRequest.md)
- [com.openai.models.CreateFineTuningJobRequestHyperparameters](docs/CreateFineTuningJobRequestHyperparameters.md)
- [com.openai.models.CreateFineTuningJobRequestHyperparametersBatchSize](docs/CreateFineTuningJobRequestHyperparametersBatchSize.md)
@@ -333,6 +374,7 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.CreateMessageRequestAttachmentsInner](docs/CreateMessageRequestAttachmentsInner.md)
- [com.openai.models.CreateMessageRequestAttachmentsInnerToolsInner](docs/CreateMessageRequestAttachmentsInnerToolsInner.md)
- [com.openai.models.CreateMessageRequestContent](docs/CreateMessageRequestContent.md)
+ - [com.openai.models.CreateModelResponseProperties](docs/CreateModelResponseProperties.md)
- [com.openai.models.CreateModerationRequest](docs/CreateModerationRequest.md)
- [com.openai.models.CreateModerationRequestInput](docs/CreateModerationRequestInput.md)
- [com.openai.models.CreateModerationRequestInputOneOfInner](docs/CreateModerationRequestInputOneOfInner.md)
@@ -345,6 +387,8 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.CreateModerationResponseResultsInnerCategories](docs/CreateModerationResponseResultsInnerCategories.md)
- [com.openai.models.CreateModerationResponseResultsInnerCategoryAppliedInputTypes](docs/CreateModerationResponseResultsInnerCategoryAppliedInputTypes.md)
- [com.openai.models.CreateModerationResponseResultsInnerCategoryScores](docs/CreateModerationResponseResultsInnerCategoryScores.md)
+ - [com.openai.models.CreateResponse](docs/CreateResponse.md)
+ - [com.openai.models.CreateResponseAllOfInput](docs/CreateResponseAllOfInput.md)
- [com.openai.models.CreateRunRequest](docs/CreateRunRequest.md)
- [com.openai.models.CreateRunRequestModel](docs/CreateRunRequestModel.md)
- [com.openai.models.CreateRunRequestToolChoice](docs/CreateRunRequestToolChoice.md)
@@ -362,8 +406,10 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.CreateTranscription200Response](docs/CreateTranscription200Response.md)
- [com.openai.models.CreateTranscriptionRequestModel](docs/CreateTranscriptionRequestModel.md)
- [com.openai.models.CreateTranscriptionResponseJson](docs/CreateTranscriptionResponseJson.md)
+ - [com.openai.models.CreateTranscriptionResponseStreamEvent](docs/CreateTranscriptionResponseStreamEvent.md)
- [com.openai.models.CreateTranscriptionResponseVerboseJson](docs/CreateTranscriptionResponseVerboseJson.md)
- [com.openai.models.CreateTranslation200Response](docs/CreateTranslation200Response.md)
+ - [com.openai.models.CreateTranslationRequestModel](docs/CreateTranslationRequestModel.md)
- [com.openai.models.CreateTranslationResponseJson](docs/CreateTranslationResponseJson.md)
- [com.openai.models.CreateTranslationResponseVerboseJson](docs/CreateTranslationResponseVerboseJson.md)
- [com.openai.models.CreateUploadRequest](docs/CreateUploadRequest.md)
@@ -374,17 +420,30 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.DefaultProjectErrorResponse](docs/DefaultProjectErrorResponse.md)
- [com.openai.models.DeleteAssistantResponse](docs/DeleteAssistantResponse.md)
- [com.openai.models.DeleteFileResponse](docs/DeleteFileResponse.md)
+ - [com.openai.models.DeleteFineTuningCheckpointPermissionResponse](docs/DeleteFineTuningCheckpointPermissionResponse.md)
- [com.openai.models.DeleteMessageResponse](docs/DeleteMessageResponse.md)
- [com.openai.models.DeleteModelResponse](docs/DeleteModelResponse.md)
- [com.openai.models.DeleteThreadResponse](docs/DeleteThreadResponse.md)
- [com.openai.models.DeleteVectorStoreFileResponse](docs/DeleteVectorStoreFileResponse.md)
- [com.openai.models.DeleteVectorStoreResponse](docs/DeleteVectorStoreResponse.md)
- [com.openai.models.DoneEvent](docs/DoneEvent.md)
+ - [com.openai.models.DoubleClick](docs/DoubleClick.md)
+ - [com.openai.models.Drag](docs/Drag.md)
+ - [com.openai.models.EasyInputMessage](docs/EasyInputMessage.md)
+ - [com.openai.models.EasyInputMessageContent](docs/EasyInputMessageContent.md)
- [com.openai.models.Embedding](docs/Embedding.md)
- [com.openai.models.Error](docs/Error.md)
- [com.openai.models.ErrorEvent](docs/ErrorEvent.md)
- [com.openai.models.ErrorResponse](docs/ErrorResponse.md)
+ - [com.openai.models.FileCitation](docs/FileCitation.md)
+ - [com.openai.models.FilePath](docs/FilePath.md)
+ - [com.openai.models.FileSearchRanker](docs/FileSearchRanker.md)
- [com.openai.models.FileSearchRankingOptions](docs/FileSearchRankingOptions.md)
+ - [com.openai.models.FileSearchTool](docs/FileSearchTool.md)
+ - [com.openai.models.FileSearchToolCall](docs/FileSearchToolCall.md)
+ - [com.openai.models.FileSearchToolCallResultsInner](docs/FileSearchToolCallResultsInner.md)
+ - [com.openai.models.FileSearchToolFilters](docs/FileSearchToolFilters.md)
+ - [com.openai.models.FileSearchToolRankingOptions](docs/FileSearchToolRankingOptions.md)
- [com.openai.models.FineTuneChatCompletionRequestAssistantMessage](docs/FineTuneChatCompletionRequestAssistantMessage.md)
- [com.openai.models.FineTuneChatRequestInput](docs/FineTuneChatRequestInput.md)
- [com.openai.models.FineTuneChatRequestInputMessagesInner](docs/FineTuneChatRequestInputMessagesInner.md)
@@ -401,6 +460,7 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.FineTunePreferenceRequestInputPreferredCompletionInner](docs/FineTunePreferenceRequestInputPreferredCompletionInner.md)
- [com.openai.models.FineTuneSupervisedMethod](docs/FineTuneSupervisedMethod.md)
- [com.openai.models.FineTuneSupervisedMethodHyperparameters](docs/FineTuneSupervisedMethodHyperparameters.md)
+ - [com.openai.models.FineTuningCheckpointPermission](docs/FineTuningCheckpointPermission.md)
- [com.openai.models.FineTuningIntegration](docs/FineTuningIntegration.md)
- [com.openai.models.FineTuningJob](docs/FineTuningJob.md)
- [com.openai.models.FineTuningJobCheckpoint](docs/FineTuningJobCheckpoint.md)
@@ -410,19 +470,39 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.FineTuningJobHyperparameters](docs/FineTuningJobHyperparameters.md)
- [com.openai.models.FineTuningJobIntegrationsInner](docs/FineTuningJobIntegrationsInner.md)
- [com.openai.models.FunctionObject](docs/FunctionObject.md)
+ - [com.openai.models.FunctionTool](docs/FunctionTool.md)
+ - [com.openai.models.FunctionToolCall](docs/FunctionToolCall.md)
+ - [com.openai.models.FunctionToolCallOutput](docs/FunctionToolCallOutput.md)
+ - [com.openai.models.FunctionToolCallOutputResource](docs/FunctionToolCallOutputResource.md)
+ - [com.openai.models.FunctionToolCallResource](docs/FunctionToolCallResource.md)
- [com.openai.models.Image](docs/Image.md)
- [com.openai.models.ImagesResponse](docs/ImagesResponse.md)
+ - [com.openai.models.Includable](docs/Includable.md)
+ - [com.openai.models.InputAudio](docs/InputAudio.md)
+ - [com.openai.models.InputContent](docs/InputContent.md)
+ - [com.openai.models.InputFile](docs/InputFile.md)
+ - [com.openai.models.InputImage](docs/InputImage.md)
+ - [com.openai.models.InputItem](docs/InputItem.md)
+ - [com.openai.models.InputMessage](docs/InputMessage.md)
+ - [com.openai.models.InputMessageResource](docs/InputMessageResource.md)
+ - [com.openai.models.InputText](docs/InputText.md)
- [com.openai.models.Invite](docs/Invite.md)
- [com.openai.models.InviteDeleteResponse](docs/InviteDeleteResponse.md)
- [com.openai.models.InviteListResponse](docs/InviteListResponse.md)
- [com.openai.models.InviteProjectsInner](docs/InviteProjectsInner.md)
- [com.openai.models.InviteRequest](docs/InviteRequest.md)
- [com.openai.models.InviteRequestProjectsInner](docs/InviteRequestProjectsInner.md)
+ - [com.openai.models.Item](docs/Item.md)
+ - [com.openai.models.ItemReference](docs/ItemReference.md)
+ - [com.openai.models.ItemResource](docs/ItemResource.md)
+ - [com.openai.models.JSONSchema](docs/JSONSchema.md)
+ - [com.openai.models.KeyPress](docs/KeyPress.md)
- [com.openai.models.ListAssistantsResponse](docs/ListAssistantsResponse.md)
- [com.openai.models.ListAuditLogsEffectiveAtParameter](docs/ListAuditLogsEffectiveAtParameter.md)
- [com.openai.models.ListAuditLogsResponse](docs/ListAuditLogsResponse.md)
- [com.openai.models.ListBatchesResponse](docs/ListBatchesResponse.md)
- [com.openai.models.ListFilesResponse](docs/ListFilesResponse.md)
+ - [com.openai.models.ListFineTuningCheckpointPermissionResponse](docs/ListFineTuningCheckpointPermissionResponse.md)
- [com.openai.models.ListFineTuningJobCheckpointsResponse](docs/ListFineTuningJobCheckpointsResponse.md)
- [com.openai.models.ListFineTuningJobEventsResponse](docs/ListFineTuningJobEventsResponse.md)
- [com.openai.models.ListMessagesResponse](docs/ListMessagesResponse.md)
@@ -433,6 +513,8 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.ListThreadsResponse](docs/ListThreadsResponse.md)
- [com.openai.models.ListVectorStoreFilesResponse](docs/ListVectorStoreFilesResponse.md)
- [com.openai.models.ListVectorStoresResponse](docs/ListVectorStoresResponse.md)
+ - [com.openai.models.LogProb](docs/LogProb.md)
+ - [com.openai.models.LogProbProperties](docs/LogProbProperties.md)
- [com.openai.models.MessageContentImageFileObject](docs/MessageContentImageFileObject.md)
- [com.openai.models.MessageContentImageFileObjectImageFile](docs/MessageContentImageFileObjectImageFile.md)
- [com.openai.models.MessageContentImageUrlObject](docs/MessageContentImageUrlObject.md)
@@ -471,6 +553,10 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.MessageStreamEventOneOf3](docs/MessageStreamEventOneOf3.md)
- [com.openai.models.MessageStreamEventOneOf4](docs/MessageStreamEventOneOf4.md)
- [com.openai.models.Model](docs/Model.md)
+ - [com.openai.models.ModelIds](docs/ModelIds.md)
+ - [com.openai.models.ModelIdsResponses](docs/ModelIdsResponses.md)
+ - [com.openai.models.ModelIdsShared](docs/ModelIdsShared.md)
+ - [com.openai.models.ModelResponseProperties](docs/ModelResponseProperties.md)
- [com.openai.models.ModifyAssistantRequest](docs/ModifyAssistantRequest.md)
- [com.openai.models.ModifyAssistantRequestModel](docs/ModifyAssistantRequestModel.md)
- [com.openai.models.ModifyAssistantRequestToolResources](docs/ModifyAssistantRequestToolResources.md)
@@ -481,8 +567,14 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.ModifyThreadRequest](docs/ModifyThreadRequest.md)
- [com.openai.models.ModifyThreadRequestToolResources](docs/ModifyThreadRequestToolResources.md)
- [com.openai.models.ModifyThreadRequestToolResourcesFileSearch](docs/ModifyThreadRequestToolResourcesFileSearch.md)
+ - [com.openai.models.Move](docs/Move.md)
- [com.openai.models.OpenAIFile](docs/OpenAIFile.md)
- [com.openai.models.OtherChunkingStrategyResponseParam](docs/OtherChunkingStrategyResponseParam.md)
+ - [com.openai.models.OutputAudio](docs/OutputAudio.md)
+ - [com.openai.models.OutputContent](docs/OutputContent.md)
+ - [com.openai.models.OutputItem](docs/OutputItem.md)
+ - [com.openai.models.OutputMessage](docs/OutputMessage.md)
+ - [com.openai.models.OutputText](docs/OutputText.md)
- [com.openai.models.PredictionContent](docs/PredictionContent.md)
- [com.openai.models.PredictionContentContent](docs/PredictionContentContent.md)
- [com.openai.models.Project](docs/Project.md)
@@ -507,8 +599,10 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.ProjectUserDeleteResponse](docs/ProjectUserDeleteResponse.md)
- [com.openai.models.ProjectUserListResponse](docs/ProjectUserListResponse.md)
- [com.openai.models.ProjectUserUpdateRequest](docs/ProjectUserUpdateRequest.md)
+ - [com.openai.models.RealtimeClientEvent](docs/RealtimeClientEvent.md)
- [com.openai.models.RealtimeClientEventConversationItemCreate](docs/RealtimeClientEventConversationItemCreate.md)
- [com.openai.models.RealtimeClientEventConversationItemDelete](docs/RealtimeClientEventConversationItemDelete.md)
+ - [com.openai.models.RealtimeClientEventConversationItemRetrieve](docs/RealtimeClientEventConversationItemRetrieve.md)
- [com.openai.models.RealtimeClientEventConversationItemTruncate](docs/RealtimeClientEventConversationItemTruncate.md)
- [com.openai.models.RealtimeClientEventInputAudioBufferAppend](docs/RealtimeClientEventInputAudioBufferAppend.md)
- [com.openai.models.RealtimeClientEventInputAudioBufferClear](docs/RealtimeClientEventInputAudioBufferClear.md)
@@ -516,6 +610,7 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.RealtimeClientEventResponseCancel](docs/RealtimeClientEventResponseCancel.md)
- [com.openai.models.RealtimeClientEventResponseCreate](docs/RealtimeClientEventResponseCreate.md)
- [com.openai.models.RealtimeClientEventSessionUpdate](docs/RealtimeClientEventSessionUpdate.md)
+ - [com.openai.models.RealtimeClientEventTranscriptionSessionUpdate](docs/RealtimeClientEventTranscriptionSessionUpdate.md)
- [com.openai.models.RealtimeConversationItem](docs/RealtimeConversationItem.md)
- [com.openai.models.RealtimeConversationItemContentInner](docs/RealtimeConversationItemContentInner.md)
- [com.openai.models.RealtimeConversationItemWithReference](docs/RealtimeConversationItemWithReference.md)
@@ -530,13 +625,16 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.RealtimeResponseUsage](docs/RealtimeResponseUsage.md)
- [com.openai.models.RealtimeResponseUsageInputTokenDetails](docs/RealtimeResponseUsageInputTokenDetails.md)
- [com.openai.models.RealtimeResponseUsageOutputTokenDetails](docs/RealtimeResponseUsageOutputTokenDetails.md)
+ - [com.openai.models.RealtimeServerEvent](docs/RealtimeServerEvent.md)
- [com.openai.models.RealtimeServerEventConversationCreated](docs/RealtimeServerEventConversationCreated.md)
- [com.openai.models.RealtimeServerEventConversationCreatedConversation](docs/RealtimeServerEventConversationCreatedConversation.md)
- [com.openai.models.RealtimeServerEventConversationItemCreated](docs/RealtimeServerEventConversationItemCreated.md)
- [com.openai.models.RealtimeServerEventConversationItemDeleted](docs/RealtimeServerEventConversationItemDeleted.md)
- [com.openai.models.RealtimeServerEventConversationItemInputAudioTranscriptionCompleted](docs/RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.md)
+ - [com.openai.models.RealtimeServerEventConversationItemInputAudioTranscriptionDelta](docs/RealtimeServerEventConversationItemInputAudioTranscriptionDelta.md)
- [com.openai.models.RealtimeServerEventConversationItemInputAudioTranscriptionFailed](docs/RealtimeServerEventConversationItemInputAudioTranscriptionFailed.md)
- [com.openai.models.RealtimeServerEventConversationItemInputAudioTranscriptionFailedError](docs/RealtimeServerEventConversationItemInputAudioTranscriptionFailedError.md)
+ - [com.openai.models.RealtimeServerEventConversationItemRetrieved](docs/RealtimeServerEventConversationItemRetrieved.md)
- [com.openai.models.RealtimeServerEventConversationItemTruncated](docs/RealtimeServerEventConversationItemTruncated.md)
- [com.openai.models.RealtimeServerEventError](docs/RealtimeServerEventError.md)
- [com.openai.models.RealtimeServerEventErrorError](docs/RealtimeServerEventErrorError.md)
@@ -564,21 +662,74 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.RealtimeServerEventResponseTextDone](docs/RealtimeServerEventResponseTextDone.md)
- [com.openai.models.RealtimeServerEventSessionCreated](docs/RealtimeServerEventSessionCreated.md)
- [com.openai.models.RealtimeServerEventSessionUpdated](docs/RealtimeServerEventSessionUpdated.md)
+ - [com.openai.models.RealtimeServerEventTranscriptionSessionUpdated](docs/RealtimeServerEventTranscriptionSessionUpdated.md)
- [com.openai.models.RealtimeSession](docs/RealtimeSession.md)
- [com.openai.models.RealtimeSessionCreateRequest](docs/RealtimeSessionCreateRequest.md)
- - [com.openai.models.RealtimeSessionCreateRequestInputAudioTranscription](docs/RealtimeSessionCreateRequestInputAudioTranscription.md)
- - [com.openai.models.RealtimeSessionCreateRequestTurnDetection](docs/RealtimeSessionCreateRequestTurnDetection.md)
- [com.openai.models.RealtimeSessionCreateResponse](docs/RealtimeSessionCreateResponse.md)
- [com.openai.models.RealtimeSessionCreateResponseClientSecret](docs/RealtimeSessionCreateResponseClientSecret.md)
+ - [com.openai.models.RealtimeSessionCreateResponseInputAudioTranscription](docs/RealtimeSessionCreateResponseInputAudioTranscription.md)
- [com.openai.models.RealtimeSessionCreateResponseTurnDetection](docs/RealtimeSessionCreateResponseTurnDetection.md)
+ - [com.openai.models.RealtimeSessionInputAudioNoiseReduction](docs/RealtimeSessionInputAudioNoiseReduction.md)
- [com.openai.models.RealtimeSessionInputAudioTranscription](docs/RealtimeSessionInputAudioTranscription.md)
- - [com.openai.models.RealtimeSessionModel](docs/RealtimeSessionModel.md)
- [com.openai.models.RealtimeSessionTurnDetection](docs/RealtimeSessionTurnDetection.md)
+ - [com.openai.models.RealtimeTranscriptionSessionCreateRequest](docs/RealtimeTranscriptionSessionCreateRequest.md)
+ - [com.openai.models.RealtimeTranscriptionSessionCreateRequestInputAudioTranscription](docs/RealtimeTranscriptionSessionCreateRequestInputAudioTranscription.md)
+ - [com.openai.models.RealtimeTranscriptionSessionCreateRequestTurnDetection](docs/RealtimeTranscriptionSessionCreateRequestTurnDetection.md)
+ - [com.openai.models.RealtimeTranscriptionSessionCreateResponse](docs/RealtimeTranscriptionSessionCreateResponse.md)
+ - [com.openai.models.RealtimeTranscriptionSessionCreateResponseClientSecret](docs/RealtimeTranscriptionSessionCreateResponseClientSecret.md)
+ - [com.openai.models.RealtimeTranscriptionSessionCreateResponseInputAudioTranscription](docs/RealtimeTranscriptionSessionCreateResponseInputAudioTranscription.md)
+ - [com.openai.models.Reasoning](docs/Reasoning.md)
- [com.openai.models.ReasoningEffort](docs/ReasoningEffort.md)
+ - [com.openai.models.ReasoningItem](docs/ReasoningItem.md)
+ - [com.openai.models.ReasoningItemSummaryInner](docs/ReasoningItemSummaryInner.md)
+ - [com.openai.models.Refusal](docs/Refusal.md)
+ - [com.openai.models.Response](docs/Response.md)
+ - [com.openai.models.ResponseAllOfIncompleteDetails](docs/ResponseAllOfIncompleteDetails.md)
+ - [com.openai.models.ResponseAudioDeltaEvent](docs/ResponseAudioDeltaEvent.md)
+ - [com.openai.models.ResponseAudioDoneEvent](docs/ResponseAudioDoneEvent.md)
+ - [com.openai.models.ResponseAudioTranscriptDeltaEvent](docs/ResponseAudioTranscriptDeltaEvent.md)
+ - [com.openai.models.ResponseAudioTranscriptDoneEvent](docs/ResponseAudioTranscriptDoneEvent.md)
+ - [com.openai.models.ResponseCodeInterpreterCallCodeDeltaEvent](docs/ResponseCodeInterpreterCallCodeDeltaEvent.md)
+ - [com.openai.models.ResponseCodeInterpreterCallCodeDoneEvent](docs/ResponseCodeInterpreterCallCodeDoneEvent.md)
+ - [com.openai.models.ResponseCodeInterpreterCallCompletedEvent](docs/ResponseCodeInterpreterCallCompletedEvent.md)
+ - [com.openai.models.ResponseCodeInterpreterCallInProgressEvent](docs/ResponseCodeInterpreterCallInProgressEvent.md)
+ - [com.openai.models.ResponseCodeInterpreterCallInterpretingEvent](docs/ResponseCodeInterpreterCallInterpretingEvent.md)
+ - [com.openai.models.ResponseCompletedEvent](docs/ResponseCompletedEvent.md)
+ - [com.openai.models.ResponseContentPartAddedEvent](docs/ResponseContentPartAddedEvent.md)
+ - [com.openai.models.ResponseContentPartDoneEvent](docs/ResponseContentPartDoneEvent.md)
+ - [com.openai.models.ResponseCreatedEvent](docs/ResponseCreatedEvent.md)
+ - [com.openai.models.ResponseError](docs/ResponseError.md)
+ - [com.openai.models.ResponseErrorCode](docs/ResponseErrorCode.md)
+ - [com.openai.models.ResponseErrorEvent](docs/ResponseErrorEvent.md)
+ - [com.openai.models.ResponseFailedEvent](docs/ResponseFailedEvent.md)
+ - [com.openai.models.ResponseFileSearchCallCompletedEvent](docs/ResponseFileSearchCallCompletedEvent.md)
+ - [com.openai.models.ResponseFileSearchCallInProgressEvent](docs/ResponseFileSearchCallInProgressEvent.md)
+ - [com.openai.models.ResponseFileSearchCallSearchingEvent](docs/ResponseFileSearchCallSearchingEvent.md)
- [com.openai.models.ResponseFormatJsonObject](docs/ResponseFormatJsonObject.md)
- [com.openai.models.ResponseFormatJsonSchema](docs/ResponseFormatJsonSchema.md)
- - [com.openai.models.ResponseFormatJsonSchemaJsonSchema](docs/ResponseFormatJsonSchemaJsonSchema.md)
- [com.openai.models.ResponseFormatText](docs/ResponseFormatText.md)
+ - [com.openai.models.ResponseFunctionCallArgumentsDeltaEvent](docs/ResponseFunctionCallArgumentsDeltaEvent.md)
+ - [com.openai.models.ResponseFunctionCallArgumentsDoneEvent](docs/ResponseFunctionCallArgumentsDoneEvent.md)
+ - [com.openai.models.ResponseInProgressEvent](docs/ResponseInProgressEvent.md)
+ - [com.openai.models.ResponseIncompleteEvent](docs/ResponseIncompleteEvent.md)
+ - [com.openai.models.ResponseItemList](docs/ResponseItemList.md)
+ - [com.openai.models.ResponseOutputItemAddedEvent](docs/ResponseOutputItemAddedEvent.md)
+ - [com.openai.models.ResponseOutputItemDoneEvent](docs/ResponseOutputItemDoneEvent.md)
+ - [com.openai.models.ResponseProperties](docs/ResponseProperties.md)
+ - [com.openai.models.ResponsePropertiesText](docs/ResponsePropertiesText.md)
+ - [com.openai.models.ResponsePropertiesToolChoice](docs/ResponsePropertiesToolChoice.md)
+ - [com.openai.models.ResponseRefusalDeltaEvent](docs/ResponseRefusalDeltaEvent.md)
+ - [com.openai.models.ResponseRefusalDoneEvent](docs/ResponseRefusalDoneEvent.md)
+ - [com.openai.models.ResponseStreamEvent](docs/ResponseStreamEvent.md)
+ - [com.openai.models.ResponseTextAnnotationDeltaEvent](docs/ResponseTextAnnotationDeltaEvent.md)
+ - [com.openai.models.ResponseTextDeltaEvent](docs/ResponseTextDeltaEvent.md)
+ - [com.openai.models.ResponseTextDoneEvent](docs/ResponseTextDoneEvent.md)
+ - [com.openai.models.ResponseUsage](docs/ResponseUsage.md)
+ - [com.openai.models.ResponseUsageInputTokensDetails](docs/ResponseUsageInputTokensDetails.md)
+ - [com.openai.models.ResponseUsageOutputTokensDetails](docs/ResponseUsageOutputTokensDetails.md)
+ - [com.openai.models.ResponseWebSearchCallCompletedEvent](docs/ResponseWebSearchCallCompletedEvent.md)
+ - [com.openai.models.ResponseWebSearchCallInProgressEvent](docs/ResponseWebSearchCallInProgressEvent.md)
+ - [com.openai.models.ResponseWebSearchCallSearchingEvent](docs/ResponseWebSearchCallSearchingEvent.md)
- [com.openai.models.RunCompletionUsage](docs/RunCompletionUsage.md)
- [com.openai.models.RunObject](docs/RunObject.md)
- [com.openai.models.RunObjectIncompleteDetails](docs/RunObjectIncompleteDetails.md)
@@ -643,23 +794,39 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.RunStreamEventOneOf9](docs/RunStreamEventOneOf9.md)
- [com.openai.models.RunToolCallObject](docs/RunToolCallObject.md)
- [com.openai.models.RunToolCallObjectFunction](docs/RunToolCallObjectFunction.md)
+ - [com.openai.models.Screenshot](docs/Screenshot.md)
+ - [com.openai.models.Scroll](docs/Scroll.md)
- [com.openai.models.StaticChunkingStrategy](docs/StaticChunkingStrategy.md)
- [com.openai.models.StaticChunkingStrategyRequestParam](docs/StaticChunkingStrategyRequestParam.md)
- [com.openai.models.StaticChunkingStrategyResponseParam](docs/StaticChunkingStrategyResponseParam.md)
- [com.openai.models.StaticChunkingStrategyStatic](docs/StaticChunkingStrategyStatic.md)
+ - [com.openai.models.StopConfiguration](docs/StopConfiguration.md)
- [com.openai.models.SubmitToolOutputsRunRequest](docs/SubmitToolOutputsRunRequest.md)
- [com.openai.models.SubmitToolOutputsRunRequestToolOutputsInner](docs/SubmitToolOutputsRunRequestToolOutputsInner.md)
+ - [com.openai.models.TextResponseFormatConfiguration](docs/TextResponseFormatConfiguration.md)
+ - [com.openai.models.TextResponseFormatJsonSchema](docs/TextResponseFormatJsonSchema.md)
- [com.openai.models.ThreadObject](docs/ThreadObject.md)
- [com.openai.models.ThreadStreamEvent](docs/ThreadStreamEvent.md)
- [com.openai.models.ThreadStreamEventOneOf](docs/ThreadStreamEventOneOf.md)
+ - [com.openai.models.Tool](docs/Tool.md)
+ - [com.openai.models.ToolChoiceFunction](docs/ToolChoiceFunction.md)
+ - [com.openai.models.ToolChoiceOptions](docs/ToolChoiceOptions.md)
+ - [com.openai.models.ToolChoiceTypes](docs/ToolChoiceTypes.md)
+ - [com.openai.models.TranscriptTextDeltaEvent](docs/TranscriptTextDeltaEvent.md)
+ - [com.openai.models.TranscriptTextDoneEvent](docs/TranscriptTextDoneEvent.md)
+ - [com.openai.models.TranscriptionInclude](docs/TranscriptionInclude.md)
- [com.openai.models.TranscriptionSegment](docs/TranscriptionSegment.md)
- [com.openai.models.TranscriptionWord](docs/TranscriptionWord.md)
- [com.openai.models.TruncationObject](docs/TruncationObject.md)
+ - [com.openai.models.Type](docs/Type.md)
+ - [com.openai.models.UpdateChatCompletionRequest](docs/UpdateChatCompletionRequest.md)
+ - [com.openai.models.UpdateVectorStoreFileAttributesRequest](docs/UpdateVectorStoreFileAttributesRequest.md)
- [com.openai.models.UpdateVectorStoreRequest](docs/UpdateVectorStoreRequest.md)
- [com.openai.models.UpdateVectorStoreRequestExpiresAfter](docs/UpdateVectorStoreRequestExpiresAfter.md)
- [com.openai.models.Upload](docs/Upload.md)
- [com.openai.models.UploadFile](docs/UploadFile.md)
- [com.openai.models.UploadPart](docs/UploadPart.md)
+ - [com.openai.models.UrlCitation](docs/UrlCitation.md)
- [com.openai.models.UsageAudioSpeechesResult](docs/UsageAudioSpeechesResult.md)
- [com.openai.models.UsageAudioTranscriptionsResult](docs/UsageAudioTranscriptionsResult.md)
- [com.openai.models.UsageCodeInterpreterSessionsResult](docs/UsageCodeInterpreterSessionsResult.md)
@@ -676,13 +843,31 @@ All URIs are relative to *https://api.openai.com/v1*
- [com.openai.models.UserListResponse](docs/UserListResponse.md)
- [com.openai.models.UserRoleUpdateRequest](docs/UserRoleUpdateRequest.md)
- [com.openai.models.VectorStoreExpirationAfter](docs/VectorStoreExpirationAfter.md)
+ - [com.openai.models.VectorStoreFileAttributesValue](docs/VectorStoreFileAttributesValue.md)
- [com.openai.models.VectorStoreFileBatchObject](docs/VectorStoreFileBatchObject.md)
- [com.openai.models.VectorStoreFileBatchObjectFileCounts](docs/VectorStoreFileBatchObjectFileCounts.md)
+ - [com.openai.models.VectorStoreFileContentResponse](docs/VectorStoreFileContentResponse.md)
+ - [com.openai.models.VectorStoreFileContentResponseDataInner](docs/VectorStoreFileContentResponseDataInner.md)
- [com.openai.models.VectorStoreFileObject](docs/VectorStoreFileObject.md)
- [com.openai.models.VectorStoreFileObjectChunkingStrategy](docs/VectorStoreFileObjectChunkingStrategy.md)
- [com.openai.models.VectorStoreFileObjectLastError](docs/VectorStoreFileObjectLastError.md)
- [com.openai.models.VectorStoreObject](docs/VectorStoreObject.md)
- [com.openai.models.VectorStoreObjectFileCounts](docs/VectorStoreObjectFileCounts.md)
+ - [com.openai.models.VectorStoreSearchRequest](docs/VectorStoreSearchRequest.md)
+ - [com.openai.models.VectorStoreSearchRequestQuery](docs/VectorStoreSearchRequestQuery.md)
+ - [com.openai.models.VectorStoreSearchRequestRankingOptions](docs/VectorStoreSearchRequestRankingOptions.md)
+ - [com.openai.models.VectorStoreSearchResultContentObject](docs/VectorStoreSearchResultContentObject.md)
+ - [com.openai.models.VectorStoreSearchResultItem](docs/VectorStoreSearchResultItem.md)
+ - [com.openai.models.VectorStoreSearchResultsPage](docs/VectorStoreSearchResultsPage.md)
+ - [com.openai.models.VoiceIdsShared](docs/VoiceIdsShared.md)
+ - [com.openai.models.Wait](docs/Wait.md)
+ - [com.openai.models.WebSearch](docs/WebSearch.md)
+ - [com.openai.models.WebSearchContextSize](docs/WebSearchContextSize.md)
+ - [com.openai.models.WebSearchLocation](docs/WebSearchLocation.md)
+ - [com.openai.models.WebSearchTool](docs/WebSearchTool.md)
+ - [com.openai.models.WebSearchToolCall](docs/WebSearchToolCall.md)
+ - [com.openai.models.WebSearchToolUserLocation](docs/WebSearchToolUserLocation.md)
+ - [com.openai.models.WebSearchUserLocation](docs/WebSearchUserLocation.md)
diff --git a/lib/build.gradle.kts b/lib/build.gradle.kts
index 4adf4b57..b5ffa083 100644
--- a/lib/build.gradle.kts
+++ b/lib/build.gradle.kts
@@ -2,8 +2,8 @@ import org.jetbrains.kotlin.gradle.tasks.KotlinCompilationTask
plugins {
alias(libs.plugins.kotlin.jvm)
- id("com.diffplug.spotless") version "7.0.2"
// id("maven-publish")
+ alias(libs.plugins.spotless)
}
group = "com.openai"
@@ -32,6 +32,9 @@ dependencies {
testImplementation(libs.kotlintest.runner.junit5)
}
+// Use spotless plugin to automatically format code, remove unused import, etc
+// To apply changes directly to the file, run `gradlew spotlessApply`
+// Ref: https://github.com/diffplug/spotless/tree/main/plugin-gradle
spotless {
kotlin {
ktfmt("0.54").googleStyle().configure {
diff --git a/lib/docs/Annotation.md b/lib/docs/Annotation.md
new file mode 100644
index 00000000..98edb50c
--- /dev/null
+++ b/lib/docs/Annotation.md
@@ -0,0 +1,23 @@
+
+# Annotation
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the file citation. Always `file_citation`. | |
+| **index** | **kotlin.Int** | The index of the file in the list of files. | |
+| **fileId** | **kotlin.String** | The ID of the file. | |
+| **url** | **kotlin.String** | The URL of the web resource. | |
+| **title** | **kotlin.String** | The title of the web resource. | |
+| **startIndex** | **kotlin.Int** | The index of the first character of the URL citation in the message. | |
+| **endIndex** | **kotlin.Int** | The index of the last character of the URL citation in the message. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_citation, url_citation, file_path |
+
+
+
diff --git a/lib/docs/AssistantObject.md b/lib/docs/AssistantObject.md
index c462cc0f..ea87a4a4 100644
--- a/lib/docs/AssistantObject.md
+++ b/lib/docs/AssistantObject.md
@@ -16,7 +16,7 @@
| **toolResources** | [**AssistantObjectToolResources**](AssistantObjectToolResources.md) | | [optional] |
| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | [optional] |
| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. | [optional] |
-| **responseFormat** | [**AssistantObjectResponseFormat**](AssistantObjectResponseFormat.md) | | [optional] |
+| **responseFormat** | [**AssistantsApiResponseFormatOption**](AssistantsApiResponseFormatOption.md) | | [optional] |
diff --git a/lib/docs/AssistantSupportedModels.md b/lib/docs/AssistantSupportedModels.md
index 97d02da4..f52bdb4d 100644
--- a/lib/docs/AssistantSupportedModels.md
+++ b/lib/docs/AssistantSupportedModels.md
@@ -24,6 +24,10 @@
* `gptMinus4oMinusMiniMinus2024Minus07Minus18` (value: `"gpt-4o-mini-2024-07-18"`)
+ * `gptMinus4Period5MinusPreview` (value: `"gpt-4.5-preview"`)
+
+ * `gptMinus4Period5MinusPreviewMinus2025Minus02Minus27` (value: `"gpt-4.5-preview-2025-02-27"`)
+
* `gptMinus4MinusTurbo` (value: `"gpt-4-turbo"`)
* `gptMinus4MinusTurboMinus2024Minus04Minus09` (value: `"gpt-4-turbo-2024-04-09"`)
diff --git a/lib/docs/AssistantsApiResponseFormatOption.md b/lib/docs/AssistantsApiResponseFormatOption.md
index 6c8b83fb..c033e43e 100644
--- a/lib/docs/AssistantsApiResponseFormatOption.md
+++ b/lib/docs/AssistantsApiResponseFormatOption.md
@@ -4,8 +4,8 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **type** | [**inline**](#Type) | The type of response format being defined: `text` | |
-| **jsonSchema** | [**ResponseFormatJsonSchemaJsonSchema**](ResponseFormatJsonSchemaJsonSchema.md) | | |
+| **type** | [**inline**](#Type) | The type of response format being defined. Always `text`. | |
+| **jsonSchema** | [**JSONSchema**](JSONSchema.md) | | |
diff --git a/lib/docs/AudioApi.md b/lib/docs/AudioApi.md
index 7ec8b1cc..83052caf 100644
--- a/lib/docs/AudioApi.md
+++ b/lib/docs/AudioApi.md
@@ -57,7 +57,7 @@ Configure ApiKeyAuth:
# **createTranscription**
-> CreateTranscription200Response createTranscription(file, model, language, prompt, responseFormat, temperature, timestampGranularities)
+> CreateTranscription200Response createTranscription(file, model, language, prompt, responseFormat, temperature, include, timestampGranularities, stream)
Transcribes audio into the input language.
@@ -74,9 +74,11 @@ val language : kotlin.String = language_example // kotlin.String | The language
val prompt : kotlin.String = prompt_example // kotlin.String | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language.
val responseFormat : AudioResponseFormat = // AudioResponseFormat |
val temperature : java.math.BigDecimal = 8.14 // java.math.BigDecimal | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
+val include : kotlin.collections.List = // kotlin.collections.List | Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with response_format set to `json` and only with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`.
val timestampGranularities : kotlin.collections.List = // kotlin.collections.List | The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
+val stream : kotlin.Boolean = true // kotlin.Boolean | If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). See the [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) for more information. Note: Streaming is not supported for the `whisper-1` model and will be ignored.
try {
- val result : CreateTranscription200Response = apiInstance.createTranscription(file, model, language, prompt, responseFormat, temperature, timestampGranularities)
+ val result : CreateTranscription200Response = apiInstance.createTranscription(file, model, language, prompt, responseFormat, temperature, include, timestampGranularities, stream)
println(result)
} catch (e: ClientException) {
println("4xx response calling AudioApi#createTranscription")
@@ -94,9 +96,11 @@ try {
| **prompt** | **kotlin.String**| An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. | [optional] |
| **responseFormat** | [**AudioResponseFormat**](AudioResponseFormat.md)| | [optional] [default to json] [enum: json, text, srt, verbose_json, vtt] |
| **temperature** | **java.math.BigDecimal**| The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. | [optional] [default to 0] |
+| **include** | [**kotlin.collections.List<TranscriptionInclude>**](TranscriptionInclude.md)| Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with response_format set to `json` and only with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`. | [optional] |
+| **timestampGranularities** | [**kotlin.collections.List<kotlin.String>**](kotlin.String.md)| The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. | [optional] [enum: word, segment] |
| Name | Type | Description | Notes |
| ------------- | ------------- | ------------- | ------------- |
-| **timestampGranularities** | [**kotlin.collections.List<kotlin.String>**](kotlin.String.md)| The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. | [optional] [enum: word, segment] |
+| **stream** | **kotlin.Boolean**| If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). See the [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) for more information. Note: Streaming is not supported for the `whisper-1` model and will be ignored. | [optional] [default to false] |
### Return type
@@ -127,9 +131,9 @@ Translates audio into English.
val apiInstance = AudioApi()
val file : java.io.File = BINARY_DATA_HERE // java.io.File | The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-val model : CreateTranscriptionRequestModel = // CreateTranscriptionRequestModel |
+val model : CreateTranslationRequestModel = // CreateTranslationRequestModel |
val prompt : kotlin.String = prompt_example // kotlin.String | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English.
-val responseFormat : AudioResponseFormat = // AudioResponseFormat |
+val responseFormat : kotlin.String = responseFormat_example // kotlin.String | The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`.
val temperature : java.math.BigDecimal = 8.14 // java.math.BigDecimal | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
try {
val result : CreateTranslation200Response = apiInstance.createTranslation(file, model, prompt, responseFormat, temperature)
@@ -145,9 +149,9 @@ try {
### Parameters
| **file** | **java.io.File**| The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. | |
-| **model** | [**CreateTranscriptionRequestModel**](CreateTranscriptionRequestModel.md)| | |
+| **model** | [**CreateTranslationRequestModel**](CreateTranslationRequestModel.md)| | |
| **prompt** | **kotlin.String**| An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should be in English. | [optional] |
-| **responseFormat** | [**AudioResponseFormat**](AudioResponseFormat.md)| | [optional] [default to json] [enum: json, text, srt, verbose_json, vtt] |
+| **responseFormat** | **kotlin.String**| The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. | [optional] [default to json] [enum: json, text, srt, verbose_json, vtt] |
| Name | Type | Description | Notes |
| ------------- | ------------- | ------------- | ------------- |
| **temperature** | **java.math.BigDecimal**| The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. | [optional] [default to 0] |
diff --git a/lib/docs/ChatApi.md b/lib/docs/ChatApi.md
index ff0161cc..ef61be7b 100644
--- a/lib/docs/ChatApi.md
+++ b/lib/docs/ChatApi.md
@@ -4,14 +4,19 @@ All URIs are relative to *https://api.openai.com/v1*
| Method | HTTP request | Description |
| ------------- | ------------- | ------------- |
-| [**createChatCompletion**](ChatApi.md#createChatCompletion) | **POST** /chat/completions | Creates a model response for the given chat conversation. Learn more in the [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), and [audio](/docs/guides/audio) guides. Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](/docs/guides/reasoning). |
+| [**createChatCompletion**](ChatApi.md#createChatCompletion) | **POST** /chat/completions | **Starting a new project?** We recommend trying [Responses](/docs/api-reference/responses) to take advantage of the latest OpenAI platform features. Compare [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses). --- Creates a model response for the given chat conversation. Learn more in the [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), and [audio](/docs/guides/audio) guides. Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](/docs/guides/reasoning). |
+| [**deleteChatCompletion**](ChatApi.md#deleteChatCompletion) | **DELETE** /chat/completions/{completion_id} | Delete a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` can be deleted. |
+| [**getChatCompletion**](ChatApi.md#getChatCompletion) | **GET** /chat/completions/{completion_id} | Get a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. |
+| [**getChatCompletionMessages**](ChatApi.md#getChatCompletionMessages) | **GET** /chat/completions/{completion_id}/messages | Get the messages in a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` will be returned. |
+| [**listChatCompletions**](ChatApi.md#listChatCompletions) | **GET** /chat/completions | List stored Chat Completions. Only Chat Completions that have been stored with the `store` parameter set to `true` will be returned. |
+| [**updateChatCompletion**](ChatApi.md#updateChatCompletion) | **POST** /chat/completions/{completion_id} | Modify a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` can be modified. Currently, the only supported modification is to update the `metadata` field. |
# **createChatCompletion**
> CreateChatCompletionResponse createChatCompletion(createChatCompletionRequest)
-Creates a model response for the given chat conversation. Learn more in the [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), and [audio](/docs/guides/audio) guides. Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](/docs/guides/reasoning).
+**Starting a new project?** We recommend trying [Responses](/docs/api-reference/responses) to take advantage of the latest OpenAI platform features. Compare [Chat Completions with Responses](/docs/guides/responses-vs-chat-completions?api-mode=responses). --- Creates a model response for the given chat conversation. Learn more in the [text generation](/docs/guides/text-generation), [vision](/docs/guides/vision), and [audio](/docs/guides/audio) guides. Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](/docs/guides/reasoning).
### Example
```kotlin
@@ -45,6 +50,252 @@ try {
### Authorization
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json
+
+
+# **deleteChatCompletion**
+> ChatCompletionDeleted deleteChatCompletion(completionId)
+
+Delete a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` can be deleted.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ChatApi()
+val completionId : kotlin.String = completionId_example // kotlin.String | The ID of the chat completion to delete.
+try {
+ val result : ChatCompletionDeleted = apiInstance.deleteChatCompletion(completionId)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ChatApi#deleteChatCompletion")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ChatApi#deleteChatCompletion")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **completionId** | **kotlin.String**| The ID of the chat completion to delete. | |
+
+### Return type
+
+[**ChatCompletionDeleted**](ChatCompletionDeleted.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **getChatCompletion**
+> CreateChatCompletionResponse getChatCompletion(completionId)
+
+Get a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` will be returned.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ChatApi()
+val completionId : kotlin.String = completionId_example // kotlin.String | The ID of the chat completion to retrieve.
+try {
+ val result : CreateChatCompletionResponse = apiInstance.getChatCompletion(completionId)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ChatApi#getChatCompletion")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ChatApi#getChatCompletion")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **completionId** | **kotlin.String**| The ID of the chat completion to retrieve. | |
+
+### Return type
+
+[**CreateChatCompletionResponse**](CreateChatCompletionResponse.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **getChatCompletionMessages**
+> ChatCompletionMessageList getChatCompletionMessages(completionId, after, limit, order)
+
+Get the messages in a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` will be returned.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ChatApi()
+val completionId : kotlin.String = completionId_example // kotlin.String | The ID of the chat completion to retrieve messages from.
+val after : kotlin.String = after_example // kotlin.String | Identifier for the last message from the previous pagination request.
+val limit : kotlin.Int = 56 // kotlin.Int | Number of messages to retrieve.
+val order : kotlin.String = order_example // kotlin.String | Sort order for messages by timestamp. Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
+try {
+ val result : ChatCompletionMessageList = apiInstance.getChatCompletionMessages(completionId, after, limit, order)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ChatApi#getChatCompletionMessages")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ChatApi#getChatCompletionMessages")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **completionId** | **kotlin.String**| The ID of the chat completion to retrieve messages from. | |
+| **after** | **kotlin.String**| Identifier for the last message from the previous pagination request. | [optional] |
+| **limit** | **kotlin.Int**| Number of messages to retrieve. | [optional] [default to 20] |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **order** | **kotlin.String**| Sort order for messages by timestamp. Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. | [optional] [default to asc] [enum: asc, desc] |
+
+### Return type
+
+[**ChatCompletionMessageList**](ChatCompletionMessageList.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **listChatCompletions**
+> ChatCompletionList listChatCompletions(model, metadata, after, limit, order)
+
+List stored Chat Completions. Only Chat Completions that have been stored with the `store` parameter set to `true` will be returned.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ChatApi()
+val model : kotlin.String = model_example // kotlin.String | The model used to generate the Chat Completions.
+val metadata : kotlin.collections.Map = // kotlin.collections.Map | A list of metadata keys to filter the Chat Completions by. Example: `metadata[key1]=value1&metadata[key2]=value2`
+val after : kotlin.String = after_example // kotlin.String | Identifier for the last chat completion from the previous pagination request.
+val limit : kotlin.Int = 56 // kotlin.Int | Number of Chat Completions to retrieve.
+val order : kotlin.String = order_example // kotlin.String | Sort order for Chat Completions by timestamp. Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`.
+try {
+ val result : ChatCompletionList = apiInstance.listChatCompletions(model, metadata, after, limit, order)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ChatApi#listChatCompletions")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ChatApi#listChatCompletions")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **model** | **kotlin.String**| The model used to generate the Chat Completions. | [optional] |
+| **metadata** | [**kotlin.collections.Map<kotlin.String, kotlin.String>**](kotlin.String.md)| A list of metadata keys to filter the Chat Completions by. Example: `metadata[key1]=value1&metadata[key2]=value2` | [optional] |
+| **after** | **kotlin.String**| Identifier for the last chat completion from the previous pagination request. | [optional] |
+| **limit** | **kotlin.Int**| Number of Chat Completions to retrieve. | [optional] [default to 20] |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **order** | **kotlin.String**| Sort order for Chat Completions by timestamp. Use `asc` for ascending order or `desc` for descending order. Defaults to `asc`. | [optional] [default to asc] [enum: asc, desc] |
+
+### Return type
+
+[**ChatCompletionList**](ChatCompletionList.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **updateChatCompletion**
+> CreateChatCompletionResponse updateChatCompletion(completionId, updateChatCompletionRequest)
+
+Modify a stored chat completion. Only Chat Completions that have been created with the `store` parameter set to `true` can be modified. Currently, the only supported modification is to update the `metadata` field.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ChatApi()
+val completionId : kotlin.String = completionId_example // kotlin.String | The ID of the chat completion to update.
+val updateChatCompletionRequest : UpdateChatCompletionRequest = // UpdateChatCompletionRequest |
+try {
+ val result : CreateChatCompletionResponse = apiInstance.updateChatCompletion(completionId, updateChatCompletionRequest)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ChatApi#updateChatCompletion")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ChatApi#updateChatCompletion")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **completionId** | **kotlin.String**| The ID of the chat completion to update. | |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **updateChatCompletionRequest** | [**UpdateChatCompletionRequest**](UpdateChatCompletionRequest.md)| | |
+
+### Return type
+
+[**CreateChatCompletionResponse**](CreateChatCompletionResponse.md)
+
+### Authorization
+
+
Configure ApiKeyAuth:
ApiClient.accessToken = ""
diff --git a/lib/docs/ChatCompletionDeleted.md b/lib/docs/ChatCompletionDeleted.md
new file mode 100644
index 00000000..a2f164a2
--- /dev/null
+++ b/lib/docs/ChatCompletionDeleted.md
@@ -0,0 +1,19 @@
+
+# ChatCompletionDeleted
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`object`** | [**inline**](#`Object`) | The type of object being deleted. | |
+| **id** | **kotlin.String** | The ID of the chat completion that was deleted. | |
+| **deleted** | **kotlin.Boolean** | Whether the chat completion was deleted. | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | chat.completion.deleted |
+
+
+
diff --git a/lib/docs/ChatCompletionList.md b/lib/docs/ChatCompletionList.md
new file mode 100644
index 00000000..475b8893
--- /dev/null
+++ b/lib/docs/ChatCompletionList.md
@@ -0,0 +1,21 @@
+
+# ChatCompletionList
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`object`** | [**inline**](#`Object`) | The type of this object. It is always set to \"list\". | |
+| **`data`** | [**kotlin.collections.List<CreateChatCompletionResponse>**](CreateChatCompletionResponse.md) | An array of chat completion objects. | |
+| **firstId** | **kotlin.String** | The identifier of the first chat completion in the data array. | |
+| **lastId** | **kotlin.String** | The identifier of the last chat completion in the data array. | |
+| **hasMore** | **kotlin.Boolean** | Indicates whether there are more Chat Completions available. | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | list |
+
+
+
diff --git a/lib/docs/ChatCompletionMessageList.md b/lib/docs/ChatCompletionMessageList.md
new file mode 100644
index 00000000..1cf6a2d1
--- /dev/null
+++ b/lib/docs/ChatCompletionMessageList.md
@@ -0,0 +1,21 @@
+
+# ChatCompletionMessageList
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`object`** | [**inline**](#`Object`) | The type of this object. It is always set to \"list\". | |
+| **`data`** | [**kotlin.collections.List<ChatCompletionMessageListDataInner>**](ChatCompletionMessageListDataInner.md) | An array of chat completion message objects. | |
+| **firstId** | **kotlin.String** | The identifier of the first chat message in the data array. | |
+| **lastId** | **kotlin.String** | The identifier of the last chat message in the data array. | |
+| **hasMore** | **kotlin.Boolean** | Indicates whether there are more chat messages available. | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | list |
+
+
+
diff --git a/lib/docs/ChatCompletionMessageListDataInner.md b/lib/docs/ChatCompletionMessageListDataInner.md
new file mode 100644
index 00000000..c82b1f1d
--- /dev/null
+++ b/lib/docs/ChatCompletionMessageListDataInner.md
@@ -0,0 +1,24 @@
+
+# ChatCompletionMessageListDataInner
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **content** | **kotlin.String** | The contents of the message. | |
+| **refusal** | **kotlin.String** | The refusal message generated by the model. | |
+| **role** | [**inline**](#Role) | The role of the author of this message. | |
+| **id** | **kotlin.String** | The identifier of the chat message. | |
+| **toolCalls** | [**kotlin.collections.List<ChatCompletionMessageToolCall>**](ChatCompletionMessageToolCall.md) | The tool calls generated by the model, such as function calls. | [optional] |
+| **annotations** | [**kotlin.collections.List<ChatCompletionResponseMessageAnnotationsInner>**](ChatCompletionResponseMessageAnnotationsInner.md) | Annotations for the message, when applicable, as when using the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] |
+| **functionCall** | [**ChatCompletionResponseMessageFunctionCall**](ChatCompletionResponseMessageFunctionCall.md) | | [optional] |
+| **audio** | [**ChatCompletionResponseMessageAudio**](ChatCompletionResponseMessageAudio.md) | | [optional] |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | assistant |
+
+
+
diff --git a/lib/docs/ChatCompletionRequestMessageContentPartFile.md b/lib/docs/ChatCompletionRequestMessageContentPartFile.md
new file mode 100644
index 00000000..95bbf22e
--- /dev/null
+++ b/lib/docs/ChatCompletionRequestMessageContentPartFile.md
@@ -0,0 +1,18 @@
+
+# ChatCompletionRequestMessageContentPartFile
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the content part. Always `file`. | |
+| **file** | [**ChatCompletionRequestMessageContentPartFileFile**](ChatCompletionRequestMessageContentPartFileFile.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file |
+
+
+
diff --git a/lib/docs/ChatCompletionRequestMessageContentPartFileFile.md b/lib/docs/ChatCompletionRequestMessageContentPartFileFile.md
new file mode 100644
index 00000000..63a8ed41
--- /dev/null
+++ b/lib/docs/ChatCompletionRequestMessageContentPartFileFile.md
@@ -0,0 +1,12 @@
+
+# ChatCompletionRequestMessageContentPartFileFile
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **filename** | **kotlin.String** | The name of the file, used when passing the file to the model as a string. | [optional] |
+| **fileData** | **kotlin.String** | The base64 encoded file data, used when passing the file to the model as a string. | [optional] |
+| **fileId** | **kotlin.String** | The ID of an uploaded file to use as input. | [optional] |
+
+
+
diff --git a/lib/docs/ChatCompletionRequestUserMessageContentPart.md b/lib/docs/ChatCompletionRequestUserMessageContentPart.md
index 9b88d210..0ca98785 100644
--- a/lib/docs/ChatCompletionRequestUserMessageContentPart.md
+++ b/lib/docs/ChatCompletionRequestUserMessageContentPart.md
@@ -8,13 +8,14 @@
| **text** | **kotlin.String** | The text content. | |
| **imageUrl** | [**ChatCompletionRequestMessageContentPartImageImageUrl**](ChatCompletionRequestMessageContentPartImageImageUrl.md) | | |
| **inputAudio** | [**ChatCompletionRequestMessageContentPartAudioInputAudio**](ChatCompletionRequestMessageContentPartAudioInputAudio.md) | | |
+| **file** | [**ChatCompletionRequestMessageContentPartFileFile**](ChatCompletionRequestMessageContentPartFileFile.md) | | |
## Enum: type
| Name | Value |
| ---- | ----- |
-| type | text, image_url, input_audio |
+| type | text, image_url, input_audio, file |
diff --git a/lib/docs/ChatCompletionResponseMessage.md b/lib/docs/ChatCompletionResponseMessage.md
index 57dd0b62..91b7d6d3 100644
--- a/lib/docs/ChatCompletionResponseMessage.md
+++ b/lib/docs/ChatCompletionResponseMessage.md
@@ -8,6 +8,7 @@
| **refusal** | **kotlin.String** | The refusal message generated by the model. | |
| **role** | [**inline**](#Role) | The role of the author of this message. | |
| **toolCalls** | [**kotlin.collections.List<ChatCompletionMessageToolCall>**](ChatCompletionMessageToolCall.md) | The tool calls generated by the model, such as function calls. | [optional] |
+| **annotations** | [**kotlin.collections.List<ChatCompletionResponseMessageAnnotationsInner>**](ChatCompletionResponseMessageAnnotationsInner.md) | Annotations for the message, when applicable, as when using the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] |
| **functionCall** | [**ChatCompletionResponseMessageFunctionCall**](ChatCompletionResponseMessageFunctionCall.md) | | [optional] |
| **audio** | [**ChatCompletionResponseMessageAudio**](ChatCompletionResponseMessageAudio.md) | | [optional] |
diff --git a/lib/docs/ChatCompletionResponseMessageAnnotationsInner.md b/lib/docs/ChatCompletionResponseMessageAnnotationsInner.md
new file mode 100644
index 00000000..aecec8f8
--- /dev/null
+++ b/lib/docs/ChatCompletionResponseMessageAnnotationsInner.md
@@ -0,0 +1,18 @@
+
+# ChatCompletionResponseMessageAnnotationsInner
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the URL citation. Always `url_citation`. | |
+| **urlCitation** | [**ChatCompletionResponseMessageAnnotationsInnerUrlCitation**](ChatCompletionResponseMessageAnnotationsInnerUrlCitation.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | url_citation |
+
+
+
diff --git a/lib/docs/ChatCompletionResponseMessageAnnotationsInnerUrlCitation.md b/lib/docs/ChatCompletionResponseMessageAnnotationsInnerUrlCitation.md
new file mode 100644
index 00000000..7440da74
--- /dev/null
+++ b/lib/docs/ChatCompletionResponseMessageAnnotationsInnerUrlCitation.md
@@ -0,0 +1,13 @@
+
+# ChatCompletionResponseMessageAnnotationsInnerUrlCitation
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **endIndex** | **kotlin.Int** | The index of the last character of the URL citation in the message. | |
+| **startIndex** | **kotlin.Int** | The index of the first character of the URL citation in the message. | |
+| **url** | **kotlin.String** | The URL of the web resource. | |
+| **title** | **kotlin.String** | The title of the web resource. | |
+
+
+
diff --git a/lib/docs/ChatCompletionStreamOptions.md b/lib/docs/ChatCompletionStreamOptions.md
index 57cadef5..fc3fc5c6 100644
--- a/lib/docs/ChatCompletionStreamOptions.md
+++ b/lib/docs/ChatCompletionStreamOptions.md
@@ -4,7 +4,7 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **includeUsage** | **kotlin.Boolean** | If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. | [optional] |
+| **includeUsage** | **kotlin.Boolean** | If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. **NOTE:** If the stream is interrupted, you may not receive the final usage chunk which contains the total token usage for the request. | [optional] |
diff --git a/lib/docs/Click.md b/lib/docs/Click.md
new file mode 100644
index 00000000..d04f5212
--- /dev/null
+++ b/lib/docs/Click.md
@@ -0,0 +1,27 @@
+
+# Click
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a click action, this property is always set to `click`. | |
+| **button** | [**inline**](#Button) | Indicates which mouse button was pressed during the click. One of `left`, `right`, `wheel`, `back`, or `forward`. | |
+| **x** | **kotlin.Int** | The x-coordinate where the click occurred. | |
+| **y** | **kotlin.Int** | The y-coordinate where the click occurred. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | click |
+
+
+
+## Enum: button
+| Name | Value |
+| ---- | ----- |
+| button | left, right, wheel, back, forward |
+
+
+
diff --git a/lib/docs/CodeInterpreterFileOutput.md b/lib/docs/CodeInterpreterFileOutput.md
new file mode 100644
index 00000000..d8a5eb51
--- /dev/null
+++ b/lib/docs/CodeInterpreterFileOutput.md
@@ -0,0 +1,18 @@
+
+# CodeInterpreterFileOutput
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the code interpreter file output. Always `files`. | |
+| **files** | [**kotlin.collections.List<CodeInterpreterFileOutputFilesInner>**](CodeInterpreterFileOutputFilesInner.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | files |
+
+
+
diff --git a/lib/docs/CodeInterpreterFileOutputFilesInner.md b/lib/docs/CodeInterpreterFileOutputFilesInner.md
new file mode 100644
index 00000000..f6f485ea
--- /dev/null
+++ b/lib/docs/CodeInterpreterFileOutputFilesInner.md
@@ -0,0 +1,11 @@
+
+# CodeInterpreterFileOutputFilesInner
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **mimeType** | **kotlin.String** | The MIME type of the file. | |
+| **fileId** | **kotlin.String** | The ID of the file. | |
+
+
+
diff --git a/lib/docs/CodeInterpreterTextOutput.md b/lib/docs/CodeInterpreterTextOutput.md
new file mode 100644
index 00000000..ea025973
--- /dev/null
+++ b/lib/docs/CodeInterpreterTextOutput.md
@@ -0,0 +1,18 @@
+
+# CodeInterpreterTextOutput
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the code interpreter text output. Always `logs`. | |
+| **logs** | **kotlin.String** | The logs of the code interpreter tool call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | logs |
+
+
+
diff --git a/lib/docs/CodeInterpreterTool.md b/lib/docs/CodeInterpreterTool.md
new file mode 100644
index 00000000..600ae8da
--- /dev/null
+++ b/lib/docs/CodeInterpreterTool.md
@@ -0,0 +1,18 @@
+
+# CodeInterpreterTool
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the code interpreter tool. Always `code_interpreter`. | |
+| **fileIds** | **kotlin.collections.List<kotlin.String>** | The IDs of the files to run the code on. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | code_interpreter |
+
+
+
diff --git a/lib/docs/CodeInterpreterToolCall.md b/lib/docs/CodeInterpreterToolCall.md
new file mode 100644
index 00000000..b3354c2c
--- /dev/null
+++ b/lib/docs/CodeInterpreterToolCall.md
@@ -0,0 +1,28 @@
+
+# CodeInterpreterToolCall
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The unique ID of the code interpreter tool call. | |
+| **type** | [**inline**](#Type) | The type of the code interpreter tool call. Always `code_interpreter_call`. | |
+| **code** | **kotlin.String** | The code to run. | |
+| **status** | [**inline**](#Status) | The status of the code interpreter tool call. | |
+| **results** | [**kotlin.collections.List<CodeInterpreterToolOutput>**](CodeInterpreterToolOutput.md) | The results of the code interpreter tool call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | code_interpreter_call |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, interpreting, completed |
+
+
+
diff --git a/lib/docs/CodeInterpreterToolOutput.md b/lib/docs/CodeInterpreterToolOutput.md
new file mode 100644
index 00000000..e58959fd
--- /dev/null
+++ b/lib/docs/CodeInterpreterToolOutput.md
@@ -0,0 +1,19 @@
+
+# CodeInterpreterToolOutput
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the code interpreter text output. Always `logs`. | |
+| **logs** | **kotlin.String** | The logs of the code interpreter tool call. | |
+| **files** | [**kotlin.collections.List<CodeInterpreterFileOutputFilesInner>**](CodeInterpreterFileOutputFilesInner.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | logs, files |
+
+
+
diff --git a/lib/docs/ComparisonFilter.md b/lib/docs/ComparisonFilter.md
new file mode 100644
index 00000000..b3163a58
--- /dev/null
+++ b/lib/docs/ComparisonFilter.md
@@ -0,0 +1,19 @@
+
+# ComparisonFilter
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. - `eq`: equals - `ne`: not equal - `gt`: greater than - `gte`: greater than or equal - `lt`: less than - `lte`: less than or equal | |
+| **key** | **kotlin.String** | The key to compare against the value. | |
+| **`value`** | [**ComparisonFilterValue**](ComparisonFilterValue.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | eq, ne, gt, gte, lt, lte |
+
+
+
diff --git a/lib/docs/CreateCompletionRequestStop.md b/lib/docs/ComparisonFilterValue.md
similarity index 80%
rename from lib/docs/CreateCompletionRequestStop.md
rename to lib/docs/ComparisonFilterValue.md
index 4637d76c..7dc0166f 100644
--- a/lib/docs/CreateCompletionRequestStop.md
+++ b/lib/docs/ComparisonFilterValue.md
@@ -1,5 +1,5 @@
-# CreateCompletionRequestStop
+# ComparisonFilterValue
## Properties
| Name | Type | Description | Notes |
diff --git a/lib/docs/CompoundFilter.md b/lib/docs/CompoundFilter.md
new file mode 100644
index 00000000..217db8db
--- /dev/null
+++ b/lib/docs/CompoundFilter.md
@@ -0,0 +1,18 @@
+
+# CompoundFilter
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Type of operation: `and` or `or`. | |
+| **filters** | [**kotlin.collections.List<CompoundFilterFiltersInner>**](CompoundFilterFiltersInner.md) | Array of filters to combine. Items can be `ComparisonFilter` or `CompoundFilter`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | and, or |
+
+
+
diff --git a/lib/docs/CompoundFilterFiltersInner.md b/lib/docs/CompoundFilterFiltersInner.md
new file mode 100644
index 00000000..c4a27227
--- /dev/null
+++ b/lib/docs/CompoundFilterFiltersInner.md
@@ -0,0 +1,19 @@
+
+# CompoundFilterFiltersInner
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. - `eq`: equals - `ne`: not equal - `gt`: greater than - `gte`: greater than or equal - `lt`: less than - `lte`: less than or equal | |
+| **key** | **kotlin.String** | The key to compare against the value. | |
+| **`value`** | [**ComparisonFilterValue**](ComparisonFilterValue.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | eq, ne, gt, gte, lt, lte |
+
+
+
diff --git a/lib/docs/ComputerAction.md b/lib/docs/ComputerAction.md
new file mode 100644
index 00000000..16f86fe8
--- /dev/null
+++ b/lib/docs/ComputerAction.md
@@ -0,0 +1,32 @@
+
+# ComputerAction
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a click action, this property is always set to `click`. | |
+| **button** | [**inline**](#Button) | Indicates which mouse button was pressed during the click. One of `left`, `right`, `wheel`, `back`, or `forward`. | |
+| **x** | **kotlin.Int** | The x-coordinate where the scroll occurred. | |
+| **y** | **kotlin.Int** | The y-coordinate where the scroll occurred. | |
+| **path** | [**kotlin.collections.List<Coordinate>**](Coordinate.md) | An array of coordinates representing the path of the drag action. Coordinates will appear as an array of objects, eg ``` [ { x: 100, y: 200 }, { x: 200, y: 300 } ] ``` | |
+| **propertyKeys** | **kotlin.collections.List<kotlin.String>** | The combination of keys the model is requesting to be pressed. This is an array of strings, each representing a key. | |
+| **scrollX** | **kotlin.Int** | The horizontal scroll distance. | |
+| **scrollY** | **kotlin.Int** | The vertical scroll distance. | |
+| **text** | **kotlin.String** | The text to type. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | click, double_click, drag, keypress, move, screenshot, scroll, type, wait |
+
+
+
+## Enum: button
+| Name | Value |
+| ---- | ----- |
+| button | left, right, wheel, back, forward |
+
+
+
diff --git a/lib/docs/ComputerScreenshotImage.md b/lib/docs/ComputerScreenshotImage.md
new file mode 100644
index 00000000..b194c338
--- /dev/null
+++ b/lib/docs/ComputerScreenshotImage.md
@@ -0,0 +1,19 @@
+
+# ComputerScreenshotImage
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a computer screenshot, this property is always set to `computer_screenshot`. | |
+| **imageUrl** | **kotlin.String** | The URL of the screenshot image. | [optional] |
+| **fileId** | **kotlin.String** | The identifier of an uploaded file that contains the screenshot. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | computer_screenshot |
+
+
+
diff --git a/lib/docs/ComputerTool.md b/lib/docs/ComputerTool.md
new file mode 100644
index 00000000..2dad8cf5
--- /dev/null
+++ b/lib/docs/ComputerTool.md
@@ -0,0 +1,27 @@
+
+# ComputerTool
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the computer use tool. Always `computer_use_preview`. | |
+| **displayWidth** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The width of the computer display. | |
+| **displayHeight** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The height of the computer display. | |
+| **environment** | [**inline**](#Environment) | The type of computer environment to control. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | computer_use_preview |
+
+
+
+## Enum: environment
+| Name | Value |
+| ---- | ----- |
+| environment | mac, windows, ubuntu, browser |
+
+
+
diff --git a/lib/docs/ComputerToolCall.md b/lib/docs/ComputerToolCall.md
new file mode 100644
index 00000000..52a3ec27
--- /dev/null
+++ b/lib/docs/ComputerToolCall.md
@@ -0,0 +1,29 @@
+
+# ComputerToolCall
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the computer call. Always `computer_call`. | |
+| **id** | **kotlin.String** | The unique ID of the computer call. | |
+| **callId** | **kotlin.String** | An identifier used when responding to the tool call with output. | |
+| **action** | [**ComputerAction**](ComputerAction.md) | | |
+| **pendingSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The pending safety checks for the computer call. | |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | computer_call |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/ComputerToolCallOutput.md b/lib/docs/ComputerToolCallOutput.md
new file mode 100644
index 00000000..0d909196
--- /dev/null
+++ b/lib/docs/ComputerToolCallOutput.md
@@ -0,0 +1,29 @@
+
+# ComputerToolCallOutput
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the computer tool call output. Always `computer_call_output`. | |
+| **callId** | **kotlin.String** | The ID of the computer tool call that produced the output. | |
+| **output** | [**ComputerScreenshotImage**](ComputerScreenshotImage.md) | | |
+| **id** | **kotlin.String** | The ID of the computer tool call output. | [optional] |
+| **acknowledgedSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The safety checks reported by the API that have been acknowledged by the developer. | [optional] |
+| **status** | [**inline**](#Status) | The status of the message input. One of `in_progress`, `completed`, or `incomplete`. Populated when input items are returned via API. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | computer_call_output |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/ComputerToolCallOutputResource.md b/lib/docs/ComputerToolCallOutputResource.md
new file mode 100644
index 00000000..22dd640a
--- /dev/null
+++ b/lib/docs/ComputerToolCallOutputResource.md
@@ -0,0 +1,29 @@
+
+# ComputerToolCallOutputResource
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the computer tool call output. Always `computer_call_output`. | |
+| **id** | **kotlin.String** | The unique ID of the computer call tool output. | |
+| **callId** | **kotlin.String** | The ID of the computer tool call that produced the output. | |
+| **output** | [**ComputerScreenshotImage**](ComputerScreenshotImage.md) | | |
+| **acknowledgedSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The safety checks reported by the API that have been acknowledged by the developer. | [optional] |
+| **status** | [**inline**](#Status) | The status of the message input. One of `in_progress`, `completed`, or `incomplete`. Populated when input items are returned via API. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | computer_call_output |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/ComputerToolCallSafetyCheck.md b/lib/docs/ComputerToolCallSafetyCheck.md
new file mode 100644
index 00000000..e537830d
--- /dev/null
+++ b/lib/docs/ComputerToolCallSafetyCheck.md
@@ -0,0 +1,12 @@
+
+# ComputerToolCallSafetyCheck
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The ID of the pending safety check. | |
+| **code** | **kotlin.String** | The type of the pending safety check. | |
+| **message** | **kotlin.String** | Details about the pending safety check. | |
+
+
+
diff --git a/lib/docs/Content.md b/lib/docs/Content.md
new file mode 100644
index 00000000..40277f5a
--- /dev/null
+++ b/lib/docs/Content.md
@@ -0,0 +1,32 @@
+
+# Content
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the input item. Always `input_file`. | |
+| **text** | **kotlin.String** | The text output from the model. | |
+| **detail** | [**inline**](#Detail) | The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`. | |
+| **annotations** | [**kotlin.collections.List<Annotation>**](Annotation.md) | The annotations of the text output. | |
+| **refusal** | **kotlin.String** | The refusal explanationfrom the model. | |
+| **imageUrl** | **kotlin.String** | The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL. | [optional] |
+| **fileId** | **kotlin.String** | The ID of the file to be sent to the model. | [optional] |
+| **filename** | **kotlin.String** | The name of the file to be sent to the model. | [optional] |
+| **fileData** | **kotlin.String** | The content of the file to be sent to the model. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | input_file, refusal |
+
+
+
+## Enum: detail
+| Name | Value |
+| ---- | ----- |
+| detail | high, low, auto |
+
+
+
diff --git a/lib/docs/Coordinate.md b/lib/docs/Coordinate.md
new file mode 100644
index 00000000..0b396cce
--- /dev/null
+++ b/lib/docs/Coordinate.md
@@ -0,0 +1,11 @@
+
+# Coordinate
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **x** | **kotlin.Int** | The x-coordinate. | |
+| **y** | **kotlin.Int** | The y-coordinate. | |
+
+
+
diff --git a/lib/docs/CreateAssistantRequest.md b/lib/docs/CreateAssistantRequest.md
index f13c88df..392c4adb 100644
--- a/lib/docs/CreateAssistantRequest.md
+++ b/lib/docs/CreateAssistantRequest.md
@@ -14,7 +14,7 @@
| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | [optional] |
| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. | [optional] |
-| **responseFormat** | [**AssistantObjectResponseFormat**](AssistantObjectResponseFormat.md) | | [optional] |
+| **responseFormat** | [**AssistantsApiResponseFormatOption**](AssistantsApiResponseFormatOption.md) | | [optional] |
diff --git a/lib/docs/CreateBatchRequest.md b/lib/docs/CreateBatchRequest.md
index 5eac7178..7808c3f1 100644
--- a/lib/docs/CreateBatchRequest.md
+++ b/lib/docs/CreateBatchRequest.md
@@ -5,7 +5,7 @@
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
| **inputFileId** | **kotlin.String** | The ID of an uploaded file that contains requests for the new batch. See [upload file](/docs/api-reference/files/create) for how to upload a file. Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. | |
-| **endpoint** | [**inline**](#Endpoint) | The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. | |
+| **endpoint** | [**inline**](#Endpoint) | The endpoint to be used for all requests in the batch. Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. | |
| **completionWindow** | [**inline**](#CompletionWindow) | The time frame within which the batch should be processed. Currently only `24h` is supported. | |
| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
@@ -14,7 +14,7 @@
## Enum: endpoint
| Name | Value |
| ---- | ----- |
-| endpoint | /v1/chat/completions, /v1/embeddings, /v1/completions |
+| endpoint | /v1/responses, /v1/chat/completions, /v1/embeddings, /v1/completions |
diff --git a/lib/docs/CreateChatCompletionRequest.md b/lib/docs/CreateChatCompletionRequest.md
index aea96e50..5710ac4e 100644
--- a/lib/docs/CreateChatCompletionRequest.md
+++ b/lib/docs/CreateChatCompletionRequest.md
@@ -5,34 +5,35 @@
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
| **messages** | [**kotlin.collections.List<ChatCompletionRequestMessage>**](ChatCompletionRequestMessage.md) | A list of messages comprising the conversation so far. Depending on the [model](/docs/models) you use, different message types (modalities) are supported, like [text](/docs/guides/text-generation), [images](/docs/guides/vision), and [audio](/docs/guides/audio). | |
-| **model** | [**CreateChatCompletionRequestModel**](CreateChatCompletionRequestModel.md) | | |
-| **store** | **kotlin.Boolean** | Whether or not to store the output of this chat completion request for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products. | [optional] |
-| **reasoningEffort** | [**ReasoningEffort**](ReasoningEffort.md) | | [optional] |
+| **model** | [**ModelIdsShared**](ModelIdsShared.md) | | |
| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
+| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] |
+| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] |
+| **user** | **kotlin.String** | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] |
+| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | [optional] |
+| **reasoningEffort** | [**ReasoningEffort**](ReasoningEffort.md) | | [optional] |
+| **maxCompletionTokens** | **kotlin.Int** | An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] |
| **frequencyPenalty** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. | [optional] |
+| **presencePenalty** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | [optional] |
+| **webSearchOptions** | [**WebSearch**](WebSearch.md) | | [optional] |
+| **topLogprobs** | **kotlin.Int** | An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | [optional] |
+| **responseFormat** | [**CreateChatCompletionRequestAllOfResponseFormat**](CreateChatCompletionRequestAllOfResponseFormat.md) | | [optional] |
+| **serviceTier** | [**inline**](#ServiceTier) | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. | [optional] |
+| **audio** | [**CreateChatCompletionRequestAllOfAudio**](CreateChatCompletionRequestAllOfAudio.md) | | [optional] |
+| **store** | **kotlin.Boolean** | Whether or not to store the output of this chat completion request for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products. | [optional] |
+| **stream** | **kotlin.Boolean** | If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). See the [Streaming section below](/docs/api-reference/chat/streaming) for more information, along with the [streaming responses](/docs/guides/streaming-responses) guide for more information on how to handle the streaming events. | [optional] |
+| **stop** | [**StopConfiguration**](StopConfiguration.md) | | [optional] |
| **logitBias** | **kotlin.collections.Map<kotlin.String, kotlin.Int>** | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | [optional] |
| **logprobs** | **kotlin.Boolean** | Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. | [optional] |
-| **topLogprobs** | **kotlin.Int** | An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | [optional] |
| **maxTokens** | **kotlin.Int** | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o1 series models](/docs/guides/reasoning). | [optional] |
-| **maxCompletionTokens** | **kotlin.Int** | An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] |
| **n** | **kotlin.Int** | How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. | [optional] |
-| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | [optional] |
-| **prediction** | [**CreateChatCompletionRequestPrediction**](CreateChatCompletionRequestPrediction.md) | | [optional] |
-| **audio** | [**CreateChatCompletionRequestAudio**](CreateChatCompletionRequestAudio.md) | | [optional] |
-| **presencePenalty** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | [optional] |
-| **responseFormat** | [**CreateChatCompletionRequestResponseFormat**](CreateChatCompletionRequestResponseFormat.md) | | [optional] |
+| **prediction** | [**CreateChatCompletionRequestAllOfPrediction**](CreateChatCompletionRequestAllOfPrediction.md) | | [optional] |
| **seed** | **kotlin.Long** | This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] |
-| **serviceTier** | [**inline**](#ServiceTier) | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarantee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. | [optional] |
-| **stop** | [**CreateChatCompletionRequestStop**](CreateChatCompletionRequestStop.md) | | [optional] |
-| **stream** | **kotlin.Boolean** | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). | [optional] |
| **streamOptions** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] |
-| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] |
-| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] |
| **tools** | [**kotlin.collections.List<ChatCompletionTool>**](ChatCompletionTool.md) | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. | [optional] |
| **toolChoice** | [**ChatCompletionToolChoiceOption**](ChatCompletionToolChoiceOption.md) | | [optional] |
| **parallelToolCalls** | **kotlin.Boolean** | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | [optional] |
-| **user** | **kotlin.String** | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] |
-| **functionCall** | [**CreateChatCompletionRequestFunctionCall**](CreateChatCompletionRequestFunctionCall.md) | | [optional] |
+| **functionCall** | [**CreateChatCompletionRequestAllOfFunctionCall**](CreateChatCompletionRequestAllOfFunctionCall.md) | | [optional] |
| **functions** | [**kotlin.collections.List<ChatCompletionFunctions>**](ChatCompletionFunctions.md) | Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. | [optional] |
diff --git a/lib/docs/CreateChatCompletionRequestAllOfAudio.md b/lib/docs/CreateChatCompletionRequestAllOfAudio.md
new file mode 100644
index 00000000..9e4baeaf
--- /dev/null
+++ b/lib/docs/CreateChatCompletionRequestAllOfAudio.md
@@ -0,0 +1,18 @@
+
+# CreateChatCompletionRequestAllOfAudio
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **voice** | [**VoiceIdsShared**](VoiceIdsShared.md) | | |
+| **format** | [**inline**](#Format) | Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. | |
+
+
+
+## Enum: format
+| Name | Value |
+| ---- | ----- |
+| format | wav, mp3, flac, opus, pcm16 |
+
+
+
diff --git a/lib/docs/CreateChatCompletionRequestFunctionCall.md b/lib/docs/CreateChatCompletionRequestAllOfFunctionCall.md
similarity index 80%
rename from lib/docs/CreateChatCompletionRequestFunctionCall.md
rename to lib/docs/CreateChatCompletionRequestAllOfFunctionCall.md
index 485799b5..56231dc0 100644
--- a/lib/docs/CreateChatCompletionRequestFunctionCall.md
+++ b/lib/docs/CreateChatCompletionRequestAllOfFunctionCall.md
@@ -1,5 +1,5 @@
-# CreateChatCompletionRequestFunctionCall
+# CreateChatCompletionRequestAllOfFunctionCall
## Properties
| Name | Type | Description | Notes |
diff --git a/lib/docs/CreateChatCompletionRequestPrediction.md b/lib/docs/CreateChatCompletionRequestAllOfPrediction.md
similarity index 90%
rename from lib/docs/CreateChatCompletionRequestPrediction.md
rename to lib/docs/CreateChatCompletionRequestAllOfPrediction.md
index 9d776145..09e6c4e9 100644
--- a/lib/docs/CreateChatCompletionRequestPrediction.md
+++ b/lib/docs/CreateChatCompletionRequestAllOfPrediction.md
@@ -1,5 +1,5 @@
-# CreateChatCompletionRequestPrediction
+# CreateChatCompletionRequestAllOfPrediction
## Properties
| Name | Type | Description | Notes |
diff --git a/lib/docs/AssistantObjectResponseFormat.md b/lib/docs/CreateChatCompletionRequestAllOfResponseFormat.md
similarity index 55%
rename from lib/docs/AssistantObjectResponseFormat.md
rename to lib/docs/CreateChatCompletionRequestAllOfResponseFormat.md
index 9f3744e9..bc51fad9 100644
--- a/lib/docs/AssistantObjectResponseFormat.md
+++ b/lib/docs/CreateChatCompletionRequestAllOfResponseFormat.md
@@ -1,18 +1,18 @@
-# AssistantObjectResponseFormat
+# CreateChatCompletionRequestAllOfResponseFormat
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **type** | [**inline**](#Type) | The type of response format being defined: `json_schema` | |
-| **jsonSchema** | [**ResponseFormatJsonSchemaJsonSchema**](ResponseFormatJsonSchemaJsonSchema.md) | | |
+| **type** | [**inline**](#Type) | The type of response format being defined. Always `text`. | |
+| **jsonSchema** | [**JSONSchema**](JSONSchema.md) | | |
## Enum: type
| Name | Value |
| ---- | ----- |
-| type | json_schema |
+| type | text, json_schema, json_object |
diff --git a/lib/docs/CreateChatCompletionRequestAudio.md b/lib/docs/CreateChatCompletionRequestAudio.md
deleted file mode 100644
index 23385b61..00000000
--- a/lib/docs/CreateChatCompletionRequestAudio.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# CreateChatCompletionRequestAudio
-
-## Properties
-| Name | Type | Description | Notes |
-| ------------ | ------------- | ------------- | ------------- |
-| **voice** | [**inline**](#Voice) | The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). | |
-| **format** | [**inline**](#Format) | Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. | |
-
-
-
-## Enum: voice
-| Name | Value |
-| ---- | ----- |
-| voice | alloy, ash, ballad, coral, echo, sage, shimmer, verse |
-
-
-
-## Enum: format
-| Name | Value |
-| ---- | ----- |
-| format | wav, mp3, flac, opus, pcm16 |
-
-
-
diff --git a/lib/docs/CreateChatCompletionRequestResponseFormat.md b/lib/docs/CreateChatCompletionRequestResponseFormat.md
deleted file mode 100644
index 6e81ed00..00000000
--- a/lib/docs/CreateChatCompletionRequestResponseFormat.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-# CreateChatCompletionRequestResponseFormat
-
-## Properties
-| Name | Type | Description | Notes |
-| ------------ | ------------- | ------------- | ------------- |
-| **type** | [**inline**](#Type) | The type of response format being defined: `text` | |
-| **jsonSchema** | [**ResponseFormatJsonSchemaJsonSchema**](ResponseFormatJsonSchemaJsonSchema.md) | | |
-
-
-
-## Enum: type
-| Name | Value |
-| ---- | ----- |
-| type | text, json_object, json_schema |
-
-
-
diff --git a/lib/docs/CreateChatCompletionStreamResponse.md b/lib/docs/CreateChatCompletionStreamResponse.md
index eccf2788..b0bd4dc8 100644
--- a/lib/docs/CreateChatCompletionStreamResponse.md
+++ b/lib/docs/CreateChatCompletionStreamResponse.md
@@ -11,7 +11,7 @@
| **`object`** | [**inline**](#`Object`) | The object type, which is always `chat.completion.chunk`. | |
| **serviceTier** | [**inline**](#ServiceTier) | The service tier used for processing the request. | [optional] |
| **systemFingerprint** | **kotlin.String** | This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. | [optional] |
-| **usage** | [**CreateChatCompletionStreamResponseUsage**](CreateChatCompletionStreamResponseUsage.md) | | [optional] |
+| **usage** | [**CompletionUsage**](CompletionUsage.md) | | [optional] |
diff --git a/lib/docs/CreateChatCompletionStreamResponseUsage.md b/lib/docs/CreateChatCompletionStreamResponseUsage.md
deleted file mode 100644
index 8fed09c0..00000000
--- a/lib/docs/CreateChatCompletionStreamResponseUsage.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-# CreateChatCompletionStreamResponseUsage
-
-## Properties
-| Name | Type | Description | Notes |
-| ------------ | ------------- | ------------- | ------------- |
-| **completionTokens** | **kotlin.Int** | Number of tokens in the generated completion. | |
-| **promptTokens** | **kotlin.Int** | Number of tokens in the prompt. | |
-| **totalTokens** | **kotlin.Int** | Total number of tokens used in the request (prompt + completion). | |
-
-
-
diff --git a/lib/docs/CreateCompletionRequest.md b/lib/docs/CreateCompletionRequest.md
index 593d078a..0a6f8296 100644
--- a/lib/docs/CreateCompletionRequest.md
+++ b/lib/docs/CreateCompletionRequest.md
@@ -15,7 +15,7 @@
| **n** | **kotlin.Int** | How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] |
| **presencePenalty** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] |
| **seed** | **kotlin.Long** | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] |
-| **stop** | [**CreateCompletionRequestStop**](CreateCompletionRequestStop.md) | | [optional] |
+| **stop** | [**StopConfiguration**](StopConfiguration.md) | | [optional] |
| **stream** | **kotlin.Boolean** | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). | [optional] |
| **streamOptions** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] |
| **suffix** | **kotlin.String** | The suffix that comes after a completion of inserted text. This parameter is only supported for `gpt-3.5-turbo-instruct`. | [optional] |
diff --git a/lib/docs/CreateFineTuningCheckpointPermissionRequest.md b/lib/docs/CreateFineTuningCheckpointPermissionRequest.md
new file mode 100644
index 00000000..086586f9
--- /dev/null
+++ b/lib/docs/CreateFineTuningCheckpointPermissionRequest.md
@@ -0,0 +1,10 @@
+
+# CreateFineTuningCheckpointPermissionRequest
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **projectIds** | **kotlin.collections.List<kotlin.String>** | The project identifiers to grant access to. | |
+
+
+
diff --git a/lib/docs/CreateFineTuningJobRequest.md b/lib/docs/CreateFineTuningJobRequest.md
index 6af07a98..3cada5b1 100644
--- a/lib/docs/CreateFineTuningJobRequest.md
+++ b/lib/docs/CreateFineTuningJobRequest.md
@@ -12,6 +12,7 @@
| **integrations** | [**kotlin.collections.List<CreateFineTuningJobRequestIntegrationsInner>**](CreateFineTuningJobRequestIntegrationsInner.md) | A list of integrations to enable for your fine-tuning job. | [optional] |
| **seed** | **kotlin.Int** | The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. | [optional] |
| **method** | [**FineTuneMethod**](FineTuneMethod.md) | | [optional] |
+| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
diff --git a/lib/docs/CreateModelResponseProperties.md b/lib/docs/CreateModelResponseProperties.md
new file mode 100644
index 00000000..84983f27
--- /dev/null
+++ b/lib/docs/CreateModelResponseProperties.md
@@ -0,0 +1,13 @@
+
+# CreateModelResponseProperties
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
+| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] |
+| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] |
+| **user** | **kotlin.String** | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] |
+
+
+
diff --git a/lib/docs/CreateResponse.md b/lib/docs/CreateResponse.md
new file mode 100644
index 00000000..8c68b4d8
--- /dev/null
+++ b/lib/docs/CreateResponse.md
@@ -0,0 +1,34 @@
+
+# CreateResponse
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **model** | [**ModelIdsResponses**](ModelIdsResponses.md) | | |
+| **input** | [**CreateResponseAllOfInput**](CreateResponseAllOfInput.md) | | |
+| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
+| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] |
+| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] |
+| **user** | **kotlin.String** | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] |
+| **previousResponseId** | **kotlin.String** | The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](/docs/guides/conversation-state). | [optional] |
+| **reasoning** | [**Reasoning**](Reasoning.md) | | [optional] |
+| **maxOutputTokens** | **kotlin.Int** | An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] |
+| **instructions** | **kotlin.String** | Inserts a system (or developer) message as the first item in the model's context. When using along with `previous_response_id`, the instructions from a previous response will be not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. | [optional] |
+| **text** | [**ResponsePropertiesText**](ResponsePropertiesText.md) | | [optional] |
+| **tools** | [**kotlin.collections.List<Tool>**](Tool.md) | An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. The two categories of tools you can provide the model are: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like [web search](/docs/guides/tools-web-search) or [file search](/docs/guides/tools-file-search). Learn more about [built-in tools](/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling the model to call your own code. Learn more about [function calling](/docs/guides/function-calling). | [optional] |
+| **toolChoice** | [**ResponsePropertiesToolChoice**](ResponsePropertiesToolChoice.md) | | [optional] |
+| **truncation** | [**inline**](#Truncation) | The truncation strategy to use for the model response. - `auto`: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation. - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. | [optional] |
+| **include** | [**kotlin.collections.List<Includable>**](Includable.md) | Specify additional output data to include in the model response. Currently supported values are: - `file_search_call.results`: Include the search results of the file search tool call. - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. | [optional] |
+| **parallelToolCalls** | **kotlin.Boolean** | Whether to allow the model to run tool calls in parallel. | [optional] |
+| **store** | **kotlin.Boolean** | Whether to store the generated model response for later retrieval via API. | [optional] |
+| **stream** | **kotlin.Boolean** | If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). See the [Streaming section below](/docs/api-reference/responses-streaming) for more information. | [optional] |
+
+
+
+## Enum: truncation
+| Name | Value |
+| ---- | ----- |
+| truncation | auto, disabled |
+
+
+
diff --git a/lib/docs/CreateChatCompletionRequestStop.md b/lib/docs/CreateResponseAllOfInput.md
similarity index 78%
rename from lib/docs/CreateChatCompletionRequestStop.md
rename to lib/docs/CreateResponseAllOfInput.md
index ba6f3832..213147ec 100644
--- a/lib/docs/CreateChatCompletionRequestStop.md
+++ b/lib/docs/CreateResponseAllOfInput.md
@@ -1,5 +1,5 @@
-# CreateChatCompletionRequestStop
+# CreateResponseAllOfInput
## Properties
| Name | Type | Description | Notes |
diff --git a/lib/docs/CreateRunRequest.md b/lib/docs/CreateRunRequest.md
index 60dac20a..e225d187 100644
--- a/lib/docs/CreateRunRequest.md
+++ b/lib/docs/CreateRunRequest.md
@@ -20,7 +20,7 @@
| **truncationStrategy** | [**CreateRunRequestTruncationStrategy**](CreateRunRequestTruncationStrategy.md) | | [optional] |
| **toolChoice** | [**CreateRunRequestToolChoice**](CreateRunRequestToolChoice.md) | | [optional] |
| **parallelToolCalls** | **kotlin.Boolean** | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | [optional] |
-| **responseFormat** | [**AssistantObjectResponseFormat**](AssistantObjectResponseFormat.md) | | [optional] |
+| **responseFormat** | [**AssistantsApiResponseFormatOption**](AssistantsApiResponseFormatOption.md) | | [optional] |
diff --git a/lib/docs/CreateSpeechRequest.md b/lib/docs/CreateSpeechRequest.md
index 6240facf..f41f6bea 100644
--- a/lib/docs/CreateSpeechRequest.md
+++ b/lib/docs/CreateSpeechRequest.md
@@ -6,18 +6,12 @@
| ------------ | ------------- | ------------- | ------------- |
| **model** | [**CreateSpeechRequestModel**](CreateSpeechRequestModel.md) | | |
| **input** | **kotlin.String** | The text to generate audio for. The maximum length is 4096 characters. | |
-| **voice** | [**inline**](#Voice) | The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options). | |
+| **voice** | [**VoiceIdsShared**](VoiceIdsShared.md) | | |
+| **instructions** | **kotlin.String** | Control the voice of your generated audio with additional instructions. Does not work with `tts-1` or `tts-1-hd`. | [optional] |
| **responseFormat** | [**inline**](#ResponseFormat) | The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. | [optional] |
| **speed** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. | [optional] |
-
-## Enum: voice
-| Name | Value |
-| ---- | ----- |
-| voice | alloy, ash, coral, echo, fable, onyx, nova, sage, shimmer |
-
-
## Enum: response_format
| Name | Value |
diff --git a/lib/docs/CreateThreadAndRunRequest.md b/lib/docs/CreateThreadAndRunRequest.md
index ff5c3f2d..d1de9d1a 100644
--- a/lib/docs/CreateThreadAndRunRequest.md
+++ b/lib/docs/CreateThreadAndRunRequest.md
@@ -19,7 +19,7 @@
| **truncationStrategy** | [**CreateRunRequestTruncationStrategy**](CreateRunRequestTruncationStrategy.md) | | [optional] |
| **toolChoice** | [**CreateRunRequestToolChoice**](CreateRunRequestToolChoice.md) | | [optional] |
| **parallelToolCalls** | **kotlin.Boolean** | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | [optional] |
-| **responseFormat** | [**AssistantObjectResponseFormat**](AssistantObjectResponseFormat.md) | | [optional] |
+| **responseFormat** | [**AssistantsApiResponseFormatOption**](AssistantsApiResponseFormatOption.md) | | [optional] |
diff --git a/lib/docs/CreateTranscription200Response.md b/lib/docs/CreateTranscription200Response.md
index e41839aa..362fae94 100644
--- a/lib/docs/CreateTranscription200Response.md
+++ b/lib/docs/CreateTranscription200Response.md
@@ -7,6 +7,7 @@
| **text** | **kotlin.String** | The transcribed text. | |
| **language** | **kotlin.String** | The language of the input audio. | |
| **duration** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The duration of the input audio. | |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the tokens in the transcription. Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added to the `include` array. | [optional] |
| **words** | [**kotlin.collections.List<TranscriptionWord>**](TranscriptionWord.md) | Extracted words and their corresponding timestamps. | [optional] |
| **segments** | [**kotlin.collections.List<TranscriptionSegment>**](TranscriptionSegment.md) | Segments of the transcribed text and their corresponding details. | [optional] |
diff --git a/lib/docs/CreateTranscriptionResponseJson.md b/lib/docs/CreateTranscriptionResponseJson.md
index c814d8a3..f6758229 100644
--- a/lib/docs/CreateTranscriptionResponseJson.md
+++ b/lib/docs/CreateTranscriptionResponseJson.md
@@ -5,6 +5,7 @@
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
| **text** | **kotlin.String** | The transcribed text. | |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the tokens in the transcription. Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added to the `include` array. | [optional] |
diff --git a/lib/docs/CreateTranscriptionResponseStreamEvent.md b/lib/docs/CreateTranscriptionResponseStreamEvent.md
new file mode 100644
index 00000000..828b5d8f
--- /dev/null
+++ b/lib/docs/CreateTranscriptionResponseStreamEvent.md
@@ -0,0 +1,20 @@
+
+# CreateTranscriptionResponseStreamEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `transcript.text.delta`. | |
+| **delta** | **kotlin.String** | The text delta that was additionally transcribed. | |
+| **text** | **kotlin.String** | The text that was transcribed. | |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the individual tokens in the transcription. Only included if you [create a transcription](/docs/api-reference/audio/create-transcription) with the `include[]` parameter set to `logprobs`. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | transcript.text.delta, transcript.text.done |
+
+
+
diff --git a/lib/docs/CreateChatCompletionRequestModel.md b/lib/docs/CreateTranslationRequestModel.md
similarity index 77%
rename from lib/docs/CreateChatCompletionRequestModel.md
rename to lib/docs/CreateTranslationRequestModel.md
index 79e183b4..bb1762b9 100644
--- a/lib/docs/CreateChatCompletionRequestModel.md
+++ b/lib/docs/CreateTranslationRequestModel.md
@@ -1,5 +1,5 @@
-# CreateChatCompletionRequestModel
+# CreateTranslationRequestModel
## Properties
| Name | Type | Description | Notes |
diff --git a/lib/docs/CreateVectorStoreFileBatchRequest.md b/lib/docs/CreateVectorStoreFileBatchRequest.md
index af996d24..f4f4317d 100644
--- a/lib/docs/CreateVectorStoreFileBatchRequest.md
+++ b/lib/docs/CreateVectorStoreFileBatchRequest.md
@@ -6,6 +6,7 @@
| ------------ | ------------- | ------------- | ------------- |
| **fileIds** | **kotlin.collections.List<kotlin.String>** | A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. | |
| **chunkingStrategy** | [**ChunkingStrategyRequestParam**](ChunkingStrategyRequestParam.md) | | [optional] |
+| **attributes** | [**kotlin.collections.Map<kotlin.String, VectorStoreFileAttributesValue>**](VectorStoreFileAttributesValue.md) | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. | [optional] |
diff --git a/lib/docs/CreateVectorStoreFileRequest.md b/lib/docs/CreateVectorStoreFileRequest.md
index 02a4c6a9..02320838 100644
--- a/lib/docs/CreateVectorStoreFileRequest.md
+++ b/lib/docs/CreateVectorStoreFileRequest.md
@@ -6,6 +6,7 @@
| ------------ | ------------- | ------------- | ------------- |
| **fileId** | **kotlin.String** | A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. | |
| **chunkingStrategy** | [**ChunkingStrategyRequestParam**](ChunkingStrategyRequestParam.md) | | [optional] |
+| **attributes** | [**kotlin.collections.Map<kotlin.String, VectorStoreFileAttributesValue>**](VectorStoreFileAttributesValue.md) | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. | [optional] |
diff --git a/lib/docs/DeleteFineTuningCheckpointPermissionResponse.md b/lib/docs/DeleteFineTuningCheckpointPermissionResponse.md
new file mode 100644
index 00000000..e6e2cf41
--- /dev/null
+++ b/lib/docs/DeleteFineTuningCheckpointPermissionResponse.md
@@ -0,0 +1,19 @@
+
+# DeleteFineTuningCheckpointPermissionResponse
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The ID of the fine-tuned model checkpoint permission that was deleted. | |
+| **`object`** | [**inline**](#`Object`) | The object type, which is always \"checkpoint.permission\". | |
+| **deleted** | **kotlin.Boolean** | Whether the fine-tuned model checkpoint permission was successfully deleted. | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | checkpoint.permission |
+
+
+
diff --git a/lib/docs/DoubleClick.md b/lib/docs/DoubleClick.md
new file mode 100644
index 00000000..8d42b2cb
--- /dev/null
+++ b/lib/docs/DoubleClick.md
@@ -0,0 +1,19 @@
+
+# DoubleClick
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a double click action, this property is always set to `double_click`. | |
+| **x** | **kotlin.Int** | The x-coordinate where the double click occurred. | |
+| **y** | **kotlin.Int** | The y-coordinate where the double click occurred. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | double_click |
+
+
+
diff --git a/lib/docs/Drag.md b/lib/docs/Drag.md
new file mode 100644
index 00000000..0a4e9ad6
--- /dev/null
+++ b/lib/docs/Drag.md
@@ -0,0 +1,18 @@
+
+# Drag
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a drag action, this property is always set to `drag`. | |
+| **path** | [**kotlin.collections.List<Coordinate>**](Coordinate.md) | An array of coordinates representing the path of the drag action. Coordinates will appear as an array of objects, eg ``` [ { x: 100, y: 200 }, { x: 200, y: 300 } ] ``` | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | drag |
+
+
+
diff --git a/lib/docs/EasyInputMessage.md b/lib/docs/EasyInputMessage.md
new file mode 100644
index 00000000..8ba92d2e
--- /dev/null
+++ b/lib/docs/EasyInputMessage.md
@@ -0,0 +1,26 @@
+
+# EasyInputMessage
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **role** | [**inline**](#Role) | The role of the message input. One of `user`, `assistant`, `system`, or `developer`. | |
+| **content** | [**EasyInputMessageContent**](EasyInputMessageContent.md) | | |
+| **type** | [**inline**](#Type) | The type of the message input. Always `message`. | [optional] |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | user, assistant, system, developer |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message |
+
+
+
diff --git a/lib/docs/EasyInputMessageContent.md b/lib/docs/EasyInputMessageContent.md
new file mode 100644
index 00000000..9029a73c
--- /dev/null
+++ b/lib/docs/EasyInputMessageContent.md
@@ -0,0 +1,9 @@
+
+# EasyInputMessageContent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+
+
+
diff --git a/lib/docs/FileCitation.md b/lib/docs/FileCitation.md
new file mode 100644
index 00000000..af0c0f22
--- /dev/null
+++ b/lib/docs/FileCitation.md
@@ -0,0 +1,19 @@
+
+# FileCitation
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the file citation. Always `file_citation`. | |
+| **index** | **kotlin.Int** | The index of the file in the list of files. | |
+| **fileId** | **kotlin.String** | The ID of the file. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_citation |
+
+
+
diff --git a/lib/docs/FilePath.md b/lib/docs/FilePath.md
new file mode 100644
index 00000000..f9adb2cd
--- /dev/null
+++ b/lib/docs/FilePath.md
@@ -0,0 +1,19 @@
+
+# FilePath
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the file path. Always `file_path`. | |
+| **fileId** | **kotlin.String** | The ID of the file. | |
+| **index** | **kotlin.Int** | The index of the file in the list of files. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_path |
+
+
+
diff --git a/lib/docs/FileSearchRanker.md b/lib/docs/FileSearchRanker.md
new file mode 100644
index 00000000..06efd00b
--- /dev/null
+++ b/lib/docs/FileSearchRanker.md
@@ -0,0 +1,12 @@
+
+# FileSearchRanker
+
+## Enum
+
+
+ * `auto` (value: `"auto"`)
+
+ * `default_2024_08_21` (value: `"default_2024_08_21"`)
+
+
+
diff --git a/lib/docs/FileSearchRankingOptions.md b/lib/docs/FileSearchRankingOptions.md
index 785d7ecb..39e645ad 100644
--- a/lib/docs/FileSearchRankingOptions.md
+++ b/lib/docs/FileSearchRankingOptions.md
@@ -5,14 +5,7 @@
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
| **scoreThreshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The score threshold for the file search. All values must be a floating point number between 0 and 1. | |
-| **ranker** | [**inline**](#Ranker) | The ranker to use for the file search. If not specified will use the `auto` ranker. | [optional] |
-
-
-
-## Enum: ranker
-| Name | Value |
-| ---- | ----- |
-| ranker | auto, default_2024_08_21 |
+| **ranker** | [**FileSearchRanker**](FileSearchRanker.md) | | [optional] |
diff --git a/lib/docs/FileSearchTool.md b/lib/docs/FileSearchTool.md
new file mode 100644
index 00000000..71931c65
--- /dev/null
+++ b/lib/docs/FileSearchTool.md
@@ -0,0 +1,21 @@
+
+# FileSearchTool
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the file search tool. Always `file_search`. | |
+| **vectorStoreIds** | **kotlin.collections.List<kotlin.String>** | The IDs of the vector stores to search. | |
+| **maxNumResults** | **kotlin.Int** | The maximum number of results to return. This number should be between 1 and 50 inclusive. | [optional] |
+| **filters** | [**FileSearchToolFilters**](FileSearchToolFilters.md) | | [optional] |
+| **rankingOptions** | [**FileSearchToolRankingOptions**](FileSearchToolRankingOptions.md) | | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_search |
+
+
+
diff --git a/lib/docs/FileSearchToolCall.md b/lib/docs/FileSearchToolCall.md
new file mode 100644
index 00000000..afdb9a95
--- /dev/null
+++ b/lib/docs/FileSearchToolCall.md
@@ -0,0 +1,28 @@
+
+# FileSearchToolCall
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The unique ID of the file search tool call. | |
+| **type** | [**inline**](#Type) | The type of the file search tool call. Always `file_search_call`. | |
+| **status** | [**inline**](#Status) | The status of the file search tool call. One of `in_progress`, `searching`, `incomplete` or `failed`, | |
+| **queries** | **kotlin.collections.List<kotlin.String>** | The queries used to search for files. | |
+| **results** | [**kotlin.collections.List<FileSearchToolCallResultsInner>**](FileSearchToolCallResultsInner.md) | The results of the file search tool call. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_search_call |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, searching, completed, incomplete, failed |
+
+
+
diff --git a/lib/docs/FileSearchToolCallResultsInner.md b/lib/docs/FileSearchToolCallResultsInner.md
new file mode 100644
index 00000000..4efc362e
--- /dev/null
+++ b/lib/docs/FileSearchToolCallResultsInner.md
@@ -0,0 +1,14 @@
+
+# FileSearchToolCallResultsInner
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **fileId** | **kotlin.String** | The unique ID of the file. | [optional] |
+| **text** | **kotlin.String** | The text that was retrieved from the file. | [optional] |
+| **filename** | **kotlin.String** | The name of the file. | [optional] |
+| **attributes** | [**kotlin.collections.Map<kotlin.String, VectorStoreFileAttributesValue>**](VectorStoreFileAttributesValue.md) | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. | [optional] |
+| **score** | **kotlin.Float** | The relevance score of the file - a value between 0 and 1. | [optional] |
+
+
+
diff --git a/lib/docs/FileSearchToolFilters.md b/lib/docs/FileSearchToolFilters.md
new file mode 100644
index 00000000..9388932d
--- /dev/null
+++ b/lib/docs/FileSearchToolFilters.md
@@ -0,0 +1,20 @@
+
+# FileSearchToolFilters
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. - `eq`: equals - `ne`: not equal - `gt`: greater than - `gte`: greater than or equal - `lt`: less than - `lte`: less than or equal | |
+| **key** | **kotlin.String** | The key to compare against the value. | |
+| **`value`** | [**ComparisonFilterValue**](ComparisonFilterValue.md) | | |
+| **filters** | [**kotlin.collections.List<CompoundFilterFiltersInner>**](CompoundFilterFiltersInner.md) | Array of filters to combine. Items can be `ComparisonFilter` or `CompoundFilter`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | eq, ne, gt, gte, lt, lte, and, or |
+
+
+
diff --git a/lib/docs/FileSearchToolRankingOptions.md b/lib/docs/FileSearchToolRankingOptions.md
new file mode 100644
index 00000000..87b0bda7
--- /dev/null
+++ b/lib/docs/FileSearchToolRankingOptions.md
@@ -0,0 +1,18 @@
+
+# FileSearchToolRankingOptions
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **ranker** | [**inline**](#Ranker) | The ranker to use for the file search. | [optional] |
+| **scoreThreshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will attempt to return only the most relevant results, but may return fewer results. | [optional] |
+
+
+
+## Enum: ranker
+| Name | Value |
+| ---- | ----- |
+| ranker | auto, default-2024-11-15 |
+
+
+
diff --git a/lib/docs/FilesApi.md b/lib/docs/FilesApi.md
index fbe7ee4c..2cee7025 100644
--- a/lib/docs/FilesApi.md
+++ b/lib/docs/FilesApi.md
@@ -25,7 +25,7 @@ Upload a file that can be used across various endpoints. Individual files can be
val apiInstance = FilesApi()
val file : java.io.File = BINARY_DATA_HERE // java.io.File | The File object (not file name) to be uploaded.
-val purpose : kotlin.String = purpose_example // kotlin.String | The intended purpose of the uploaded file. Use \\\"assistants\\\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \\\"vision\\\" for Assistants image file inputs, \\\"batch\\\" for [Batch API](/docs/guides/batch), and \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tuning).
+val purpose : kotlin.String = purpose_example // kotlin.String | The intended purpose of the uploaded file. One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets
try {
val result : OpenAIFile = apiInstance.createFile(file, purpose)
println(result)
@@ -42,7 +42,7 @@ try {
| **file** | **java.io.File**| The File object (not file name) to be uploaded. | |
| Name | Type | Description | Notes |
| ------------- | ------------- | ------------- | ------------- |
-| **purpose** | **kotlin.String**| The intended purpose of the uploaded file. Use \\\"assistants\\\" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, \\\"vision\\\" for Assistants image file inputs, \\\"batch\\\" for [Batch API](/docs/guides/batch), and \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tuning). | [enum: assistants, batch, fine-tune, vision] |
+| **purpose** | **kotlin.String**| The intended purpose of the uploaded file. One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets | [enum: assistants, batch, fine-tune, vision, user_data, evals] |
### Return type
diff --git a/lib/docs/FineTuningApi.md b/lib/docs/FineTuningApi.md
index 59b0f19b..c8513373 100644
--- a/lib/docs/FineTuningApi.md
+++ b/lib/docs/FineTuningApi.md
@@ -5,7 +5,10 @@ All URIs are relative to *https://api.openai.com/v1*
| Method | HTTP request | Description |
| ------------- | ------------- | ------------- |
| [**cancelFineTuningJob**](FineTuningApi.md#cancelFineTuningJob) | **POST** /fine_tuning/jobs/{fine_tuning_job_id}/cancel | Immediately cancel a fine-tune job. |
+| [**createFineTuningCheckpointPermission**](FineTuningApi.md#createFineTuningCheckpointPermission) | **POST** /fine_tuning/checkpoints/{permission_id}/permissions | **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). This enables organization owners to share fine-tuned models with other projects in their organization. |
| [**createFineTuningJob**](FineTuningApi.md#createFineTuningJob) | **POST** /fine_tuning/jobs | Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](/docs/guides/fine-tuning) |
+| [**deleteFineTuningCheckpointPermission**](FineTuningApi.md#deleteFineTuningCheckpointPermission) | **DELETE** /fine_tuning/checkpoints/{permission_id}/permissions | **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). Organization owners can use this endpoint to delete a permission for a fine-tuned model checkpoint. |
+| [**listFineTuningCheckpointPermissions**](FineTuningApi.md#listFineTuningCheckpointPermissions) | **GET** /fine_tuning/checkpoints/{permission_id}/permissions | **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). Organization owners can use this endpoint to view all permissions for a fine-tuned model checkpoint. |
| [**listFineTuningEvents**](FineTuningApi.md#listFineTuningEvents) | **GET** /fine_tuning/jobs/{fine_tuning_job_id}/events | Get status updates for a fine-tuning job. |
| [**listFineTuningJobCheckpoints**](FineTuningApi.md#listFineTuningJobCheckpoints) | **GET** /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints | List checkpoints for a fine-tuning job. |
| [**listPaginatedFineTuningJobs**](FineTuningApi.md#listPaginatedFineTuningJobs) | **GET** /fine_tuning/jobs | List your organization's fine-tuning jobs |
@@ -58,6 +61,54 @@ Configure ApiKeyAuth:
- **Content-Type**: Not defined
- **Accept**: application/json
+
+# **createFineTuningCheckpointPermission**
+> ListFineTuningCheckpointPermissionResponse createFineTuningCheckpointPermission(permissionId, createFineTuningCheckpointPermissionRequest)
+
+**NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). This enables organization owners to share fine-tuned models with other projects in their organization.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = FineTuningApi()
+val permissionId : kotlin.String = ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd // kotlin.String | The ID of the fine-tuned model checkpoint to create a permission for.
+val createFineTuningCheckpointPermissionRequest : CreateFineTuningCheckpointPermissionRequest = // CreateFineTuningCheckpointPermissionRequest |
+try {
+ val result : ListFineTuningCheckpointPermissionResponse = apiInstance.createFineTuningCheckpointPermission(permissionId, createFineTuningCheckpointPermissionRequest)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling FineTuningApi#createFineTuningCheckpointPermission")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling FineTuningApi#createFineTuningCheckpointPermission")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **permissionId** | **kotlin.String**| The ID of the fine-tuned model checkpoint to create a permission for. | |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **createFineTuningCheckpointPermissionRequest** | [**CreateFineTuningCheckpointPermissionRequest**](CreateFineTuningCheckpointPermissionRequest.md)| | |
+
+### Return type
+
+[**ListFineTuningCheckpointPermissionResponse**](ListFineTuningCheckpointPermissionResponse.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json
+
# **createFineTuningJob**
> FineTuningJob createFineTuningJob(createFineTuningJobRequest)
@@ -104,6 +155,106 @@ Configure ApiKeyAuth:
- **Content-Type**: application/json
- **Accept**: application/json
+
+# **deleteFineTuningCheckpointPermission**
+> DeleteFineTuningCheckpointPermissionResponse deleteFineTuningCheckpointPermission(permissionId)
+
+**NOTE:** This endpoint requires an [admin API key](../admin-api-keys). Organization owners can use this endpoint to delete a permission for a fine-tuned model checkpoint.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = FineTuningApi()
+val permissionId : kotlin.String = cp_zc4Q7MP6XxulcVzj4MZdwsAB // kotlin.String | The ID of the fine-tuned model checkpoint permission to delete.
+try {
+ val result : DeleteFineTuningCheckpointPermissionResponse = apiInstance.deleteFineTuningCheckpointPermission(permissionId)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling FineTuningApi#deleteFineTuningCheckpointPermission")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling FineTuningApi#deleteFineTuningCheckpointPermission")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **permissionId** | **kotlin.String**| The ID of the fine-tuned model checkpoint permission to delete. | |
+
+### Return type
+
+[**DeleteFineTuningCheckpointPermissionResponse**](DeleteFineTuningCheckpointPermissionResponse.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **listFineTuningCheckpointPermissions**
+> ListFineTuningCheckpointPermissionResponse listFineTuningCheckpointPermissions(permissionId, projectId, after, limit, order)
+
+**NOTE:** This endpoint requires an [admin API key](../admin-api-keys). Organization owners can use this endpoint to view all permissions for a fine-tuned model checkpoint.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = FineTuningApi()
+val permissionId : kotlin.String = ft-AF1WoRqd3aJAHsqc9NY7iL8F // kotlin.String | The ID of the fine-tuned model checkpoint to get permissions for.
+val projectId : kotlin.String = projectId_example // kotlin.String | The ID of the project to get permissions for.
+val after : kotlin.String = after_example // kotlin.String | Identifier for the last permission ID from the previous pagination request.
+val limit : kotlin.Int = 56 // kotlin.Int | Number of permissions to retrieve.
+val order : kotlin.String = order_example // kotlin.String | The order in which to retrieve permissions.
+try {
+ val result : ListFineTuningCheckpointPermissionResponse = apiInstance.listFineTuningCheckpointPermissions(permissionId, projectId, after, limit, order)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling FineTuningApi#listFineTuningCheckpointPermissions")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling FineTuningApi#listFineTuningCheckpointPermissions")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **permissionId** | **kotlin.String**| The ID of the fine-tuned model checkpoint to get permissions for. | |
+| **projectId** | **kotlin.String**| The ID of the project to get permissions for. | [optional] |
+| **after** | **kotlin.String**| Identifier for the last permission ID from the previous pagination request. | [optional] |
+| **limit** | **kotlin.Int**| Number of permissions to retrieve. | [optional] [default to 10] |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **order** | **kotlin.String**| The order in which to retrieve permissions. | [optional] [default to descending] [enum: ascending, descending] |
+
+### Return type
+
+[**ListFineTuningCheckpointPermissionResponse**](ListFineTuningCheckpointPermissionResponse.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
# **listFineTuningEvents**
> ListFineTuningJobEventsResponse listFineTuningEvents(fineTuningJobId, after, limit)
@@ -206,7 +357,7 @@ Configure ApiKeyAuth:
# **listPaginatedFineTuningJobs**
-> ListPaginatedFineTuningJobsResponse listPaginatedFineTuningJobs(after, limit)
+> ListPaginatedFineTuningJobsResponse listPaginatedFineTuningJobs(after, limit, metadata)
List your organization's fine-tuning jobs
@@ -219,8 +370,9 @@ List your organization's fine-tuning jobs
val apiInstance = FineTuningApi()
val after : kotlin.String = after_example // kotlin.String | Identifier for the last job from the previous pagination request.
val limit : kotlin.Int = 56 // kotlin.Int | Number of fine-tuning jobs to retrieve.
+val metadata : kotlin.collections.Map = // kotlin.collections.Map | Optional metadata filter. To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to indicate no metadata.
try {
- val result : ListPaginatedFineTuningJobsResponse = apiInstance.listPaginatedFineTuningJobs(after, limit)
+ val result : ListPaginatedFineTuningJobsResponse = apiInstance.listPaginatedFineTuningJobs(after, limit, metadata)
println(result)
} catch (e: ClientException) {
println("4xx response calling FineTuningApi#listPaginatedFineTuningJobs")
@@ -233,9 +385,10 @@ try {
### Parameters
| **after** | **kotlin.String**| Identifier for the last job from the previous pagination request. | [optional] |
+| **limit** | **kotlin.Int**| Number of fine-tuning jobs to retrieve. | [optional] [default to 20] |
| Name | Type | Description | Notes |
| ------------- | ------------- | ------------- | ------------- |
-| **limit** | **kotlin.Int**| Number of fine-tuning jobs to retrieve. | [optional] [default to 20] |
+| **metadata** | [**kotlin.collections.Map<kotlin.String, kotlin.String>**](kotlin.String.md)| Optional metadata filter. To filter, use the syntax `metadata[k]=v`. Alternatively, set `metadata=null` to indicate no metadata. | [optional] |
### Return type
diff --git a/lib/docs/FineTuningCheckpointPermission.md b/lib/docs/FineTuningCheckpointPermission.md
new file mode 100644
index 00000000..35da0ad1
--- /dev/null
+++ b/lib/docs/FineTuningCheckpointPermission.md
@@ -0,0 +1,20 @@
+
+# FineTuningCheckpointPermission
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The permission identifier, which can be referenced in the API endpoints. | |
+| **createdAt** | **kotlin.Int** | The Unix timestamp (in seconds) for when the permission was created. | |
+| **projectId** | **kotlin.String** | The project identifier that the permission is for. | |
+| **`object`** | [**inline**](#`Object`) | The object type, which is always \"checkpoint.permission\". | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | checkpoint.permission |
+
+
+
diff --git a/lib/docs/FineTuningJob.md b/lib/docs/FineTuningJob.md
index 97678eab..dab8a3cf 100644
--- a/lib/docs/FineTuningJob.md
+++ b/lib/docs/FineTuningJob.md
@@ -22,6 +22,7 @@
| **integrations** | [**kotlin.collections.List<FineTuningJobIntegrationsInner>**](FineTuningJobIntegrationsInner.md) | A list of integrations to enable for this fine-tuning job. | [optional] |
| **estimatedFinish** | **kotlin.Int** | The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. | [optional] |
| **method** | [**FineTuneMethod**](FineTuneMethod.md) | | [optional] |
+| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
diff --git a/lib/docs/FunctionTool.md b/lib/docs/FunctionTool.md
new file mode 100644
index 00000000..d4578c2e
--- /dev/null
+++ b/lib/docs/FunctionTool.md
@@ -0,0 +1,21 @@
+
+# FunctionTool
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the function tool. Always `function`. | |
+| **name** | **kotlin.String** | The name of the function to call. | |
+| **parameters** | [**kotlin.collections.Map<kotlin.String, kotlin.Any>**](kotlin.Any.md) | A JSON schema object describing the parameters of the function. | |
+| **strict** | **kotlin.Boolean** | Whether to enforce strict parameter validation. Default `true`. | |
+| **description** | **kotlin.String** | A description of the function. Used by the model to determine whether or not to call the function. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | function |
+
+
+
diff --git a/lib/docs/FunctionToolCall.md b/lib/docs/FunctionToolCall.md
new file mode 100644
index 00000000..a37b045b
--- /dev/null
+++ b/lib/docs/FunctionToolCall.md
@@ -0,0 +1,29 @@
+
+# FunctionToolCall
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the function tool call. Always `function_call`. | |
+| **callId** | **kotlin.String** | The unique ID of the function tool call generated by the model. | |
+| **name** | **kotlin.String** | The name of the function to run. | |
+| **arguments** | **kotlin.String** | A JSON string of the arguments to pass to the function. | |
+| **id** | **kotlin.String** | The unique ID of the function tool call. | [optional] |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | function_call |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/FunctionToolCallOutput.md b/lib/docs/FunctionToolCallOutput.md
new file mode 100644
index 00000000..11a4d67b
--- /dev/null
+++ b/lib/docs/FunctionToolCallOutput.md
@@ -0,0 +1,28 @@
+
+# FunctionToolCallOutput
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the function tool call output. Always `function_call_output`. | |
+| **callId** | **kotlin.String** | The unique ID of the function tool call generated by the model. | |
+| **output** | **kotlin.String** | A JSON string of the output of the function tool call. | |
+| **id** | **kotlin.String** | The unique ID of the function tool call output. Populated when this item is returned via API. | [optional] |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | function_call_output |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/FunctionToolCallOutputResource.md b/lib/docs/FunctionToolCallOutputResource.md
new file mode 100644
index 00000000..7c8ec391
--- /dev/null
+++ b/lib/docs/FunctionToolCallOutputResource.md
@@ -0,0 +1,28 @@
+
+# FunctionToolCallOutputResource
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The unique ID of the function call tool output. | |
+| **type** | [**inline**](#Type) | The type of the function tool call output. Always `function_call_output`. | |
+| **callId** | **kotlin.String** | The unique ID of the function tool call generated by the model. | |
+| **output** | **kotlin.String** | A JSON string of the output of the function tool call. | |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | function_call_output |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/FunctionToolCallResource.md b/lib/docs/FunctionToolCallResource.md
new file mode 100644
index 00000000..77c87c04
--- /dev/null
+++ b/lib/docs/FunctionToolCallResource.md
@@ -0,0 +1,29 @@
+
+# FunctionToolCallResource
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The unique ID of the function tool call. | |
+| **type** | [**inline**](#Type) | The type of the function tool call. Always `function_call`. | |
+| **callId** | **kotlin.String** | The unique ID of the function tool call generated by the model. | |
+| **name** | **kotlin.String** | The name of the function to run. | |
+| **arguments** | **kotlin.String** | A JSON string of the arguments to pass to the function. | |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | function_call |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/Includable.md b/lib/docs/Includable.md
new file mode 100644
index 00000000..434bdd2c
--- /dev/null
+++ b/lib/docs/Includable.md
@@ -0,0 +1,14 @@
+
+# Includable
+
+## Enum
+
+
+ * `file_search_callPeriodResults` (value: `"file_search_call.results"`)
+
+ * `messagePeriodInput_imagePeriodImage_url` (value: `"message.input_image.image_url"`)
+
+ * `computer_call_outputPeriodOutputPeriodImage_url` (value: `"computer_call_output.output.image_url"`)
+
+
+
diff --git a/lib/docs/InputAudio.md b/lib/docs/InputAudio.md
new file mode 100644
index 00000000..42b55d4e
--- /dev/null
+++ b/lib/docs/InputAudio.md
@@ -0,0 +1,26 @@
+
+# InputAudio
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the input item. Always `input_audio`. | |
+| **`data`** | **kotlin.String** | Base64-encoded audio data. | |
+| **format** | [**inline**](#Format) | The format of the audio data. Currently supported formats are `mp3` and `wav`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | input_audio |
+
+
+
+## Enum: format
+| Name | Value |
+| ---- | ----- |
+| format | mp3, wav |
+
+
+
diff --git a/lib/docs/InputContent.md b/lib/docs/InputContent.md
new file mode 100644
index 00000000..6bd4dad5
--- /dev/null
+++ b/lib/docs/InputContent.md
@@ -0,0 +1,30 @@
+
+# InputContent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the input item. Always `input_text`. | |
+| **text** | **kotlin.String** | The text input to the model. | |
+| **detail** | [**inline**](#Detail) | The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`. | |
+| **imageUrl** | **kotlin.String** | The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL. | [optional] |
+| **fileId** | **kotlin.String** | The ID of the file to be sent to the model. | [optional] |
+| **filename** | **kotlin.String** | The name of the file to be sent to the model. | [optional] |
+| **fileData** | **kotlin.String** | The content of the file to be sent to the model. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | input_text, input_image, input_file |
+
+
+
+## Enum: detail
+| Name | Value |
+| ---- | ----- |
+| detail | high, low, auto |
+
+
+
diff --git a/lib/docs/InputFile.md b/lib/docs/InputFile.md
new file mode 100644
index 00000000..68ac0208
--- /dev/null
+++ b/lib/docs/InputFile.md
@@ -0,0 +1,20 @@
+
+# InputFile
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the input item. Always `input_file`. | |
+| **fileId** | **kotlin.String** | The ID of the file to be sent to the model. | [optional] |
+| **filename** | **kotlin.String** | The name of the file to be sent to the model. | [optional] |
+| **fileData** | **kotlin.String** | The content of the file to be sent to the model. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | input_file |
+
+
+
diff --git a/lib/docs/InputImage.md b/lib/docs/InputImage.md
new file mode 100644
index 00000000..a42573d8
--- /dev/null
+++ b/lib/docs/InputImage.md
@@ -0,0 +1,27 @@
+
+# InputImage
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the input item. Always `input_image`. | |
+| **detail** | [**inline**](#Detail) | The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`. | |
+| **imageUrl** | **kotlin.String** | The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL. | [optional] |
+| **fileId** | **kotlin.String** | The ID of the file to be sent to the model. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | input_image |
+
+
+
+## Enum: detail
+| Name | Value |
+| ---- | ----- |
+| detail | high, low, auto |
+
+
+
diff --git a/lib/docs/InputItem.md b/lib/docs/InputItem.md
new file mode 100644
index 00000000..81ca6496
--- /dev/null
+++ b/lib/docs/InputItem.md
@@ -0,0 +1,27 @@
+
+# InputItem
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **role** | [**inline**](#Role) | The role of the message input. One of `user`, `assistant`, `system`, or `developer`. | |
+| **content** | [**EasyInputMessageContent**](EasyInputMessageContent.md) | | |
+| **type** | [**inline**](#Type) | The type of the message input. Always `message`. | |
+| **id** | **kotlin.String** | The ID of the item to reference. | |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | user, assistant, system, developer |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message, item_reference |
+
+
+
diff --git a/lib/docs/InputMessage.md b/lib/docs/InputMessage.md
new file mode 100644
index 00000000..9b1dff79
--- /dev/null
+++ b/lib/docs/InputMessage.md
@@ -0,0 +1,34 @@
+
+# InputMessage
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **role** | [**inline**](#Role) | The role of the message input. One of `user`, `system`, or `developer`. | |
+| **content** | [**kotlin.collections.List<InputContent>**](InputContent.md) | A list of one or many input items to the model, containing different content types. | |
+| **type** | [**inline**](#Type) | The type of the message input. Always set to `message`. | [optional] |
+| **status** | [**inline**](#Status) | The status of item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | [optional] |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | user, system, developer |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/InputMessageResource.md b/lib/docs/InputMessageResource.md
new file mode 100644
index 00000000..eaad7ca7
--- /dev/null
+++ b/lib/docs/InputMessageResource.md
@@ -0,0 +1,35 @@
+
+# InputMessageResource
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **role** | [**inline**](#Role) | The role of the message input. One of `user`, `system`, or `developer`. | |
+| **content** | [**kotlin.collections.List<InputContent>**](InputContent.md) | A list of one or many input items to the model, containing different content types. | |
+| **id** | **kotlin.String** | The unique ID of the message input. | |
+| **type** | [**inline**](#Type) | The type of the message input. Always set to `message`. | [optional] |
+| **status** | [**inline**](#Status) | The status of item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | [optional] |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | user, system, developer |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/InputText.md b/lib/docs/InputText.md
new file mode 100644
index 00000000..8a4a036d
--- /dev/null
+++ b/lib/docs/InputText.md
@@ -0,0 +1,18 @@
+
+# InputText
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the input item. Always `input_text`. | |
+| **text** | **kotlin.String** | The text input to the model. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | input_text |
+
+
+
diff --git a/lib/docs/Item.md b/lib/docs/Item.md
new file mode 100644
index 00000000..dcf7d3f5
--- /dev/null
+++ b/lib/docs/Item.md
@@ -0,0 +1,45 @@
+
+# Item
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the message input. Always set to `message`. | |
+| **role** | [**inline**](#Role) | The role of the output message. Always `assistant`. | |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | |
+| **content** | [**kotlin.collections.List<OutputContent>**](OutputContent.md) | The content of the output message. | |
+| **id** | **kotlin.String** | The unique identifier of the reasoning content. | |
+| **queries** | **kotlin.collections.List<kotlin.String>** | The queries used to search for files. | |
+| **callId** | **kotlin.String** | The unique ID of the function tool call generated by the model. | |
+| **action** | [**ComputerAction**](ComputerAction.md) | | |
+| **pendingSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The pending safety checks for the computer call. | |
+| **output** | **kotlin.String** | A JSON string of the output of the function tool call. | |
+| **name** | **kotlin.String** | The name of the function to run. | |
+| **arguments** | **kotlin.String** | A JSON string of the arguments to pass to the function. | |
+| **summary** | [**kotlin.collections.List<ReasoningItemSummaryInner>**](ReasoningItemSummaryInner.md) | Reasoning text contents. | |
+| **results** | [**kotlin.collections.List<FileSearchToolCallResultsInner>**](FileSearchToolCallResultsInner.md) | The results of the file search tool call. | [optional] |
+| **acknowledgedSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The safety checks reported by the API that have been acknowledged by the developer. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message, file_search_call, computer_call, computer_call_output, web_search_call, function_call, function_call_output, reasoning |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | assistant |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/ItemReference.md b/lib/docs/ItemReference.md
new file mode 100644
index 00000000..89ff1171
--- /dev/null
+++ b/lib/docs/ItemReference.md
@@ -0,0 +1,18 @@
+
+# ItemReference
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The ID of the item to reference. | |
+| **type** | [**inline**](#Type) | The type of item to reference. Always `item_reference`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | item_reference |
+
+
+
diff --git a/lib/docs/ItemResource.md b/lib/docs/ItemResource.md
new file mode 100644
index 00000000..7cafcedb
--- /dev/null
+++ b/lib/docs/ItemResource.md
@@ -0,0 +1,44 @@
+
+# ItemResource
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the message input. Always set to `message`. | |
+| **role** | [**inline**](#Role) | The role of the output message. Always `assistant`. | |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | |
+| **content** | [**kotlin.collections.List<OutputContent>**](OutputContent.md) | The content of the output message. | |
+| **id** | **kotlin.String** | The unique ID of the function call tool output. | |
+| **queries** | **kotlin.collections.List<kotlin.String>** | The queries used to search for files. | |
+| **callId** | **kotlin.String** | The unique ID of the function tool call generated by the model. | |
+| **action** | [**ComputerAction**](ComputerAction.md) | | |
+| **pendingSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The pending safety checks for the computer call. | |
+| **output** | **kotlin.String** | A JSON string of the output of the function tool call. | |
+| **name** | **kotlin.String** | The name of the function to run. | |
+| **arguments** | **kotlin.String** | A JSON string of the arguments to pass to the function. | |
+| **results** | [**kotlin.collections.List<FileSearchToolCallResultsInner>**](FileSearchToolCallResultsInner.md) | The results of the file search tool call. | [optional] |
+| **acknowledgedSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The safety checks reported by the API that have been acknowledged by the developer. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message, file_search_call, computer_call, computer_call_output, web_search_call, function_call, function_call_output |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | assistant |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/ResponseFormatJsonSchemaJsonSchema.md b/lib/docs/JSONSchema.md
similarity index 74%
rename from lib/docs/ResponseFormatJsonSchemaJsonSchema.md
rename to lib/docs/JSONSchema.md
index 9905ae22..b1001a66 100644
--- a/lib/docs/ResponseFormatJsonSchemaJsonSchema.md
+++ b/lib/docs/JSONSchema.md
@@ -1,13 +1,13 @@
-# ResponseFormatJsonSchemaJsonSchema
+# JSONSchema
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **name** | **kotlin.String** | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | |
-| **description** | **kotlin.String** | A description of what the response format is for, used by the model to determine how to respond in the format. | [optional] |
-| **schema** | [**kotlin.collections.Map<kotlin.String, kotlin.Any>**](kotlin.Any.md) | The schema for the response format, described as a JSON Schema object. | [optional] |
-| **strict** | **kotlin.Boolean** | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). | [optional] |
+| **name** | **kotlin.String** | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | |
+| **description** | **kotlin.String** | A description of what the response format is for, used by the model to determine how to respond in the format. | [optional] |
+| **schema** | [**kotlin.collections.Map<kotlin.String, kotlin.Any>**](kotlin.Any.md) | The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas [here](https://json-schema.org/). | [optional] |
+| **strict** | **kotlin.Boolean** | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). | [optional] |
diff --git a/lib/docs/KeyPress.md b/lib/docs/KeyPress.md
new file mode 100644
index 00000000..6f7136c5
--- /dev/null
+++ b/lib/docs/KeyPress.md
@@ -0,0 +1,18 @@
+
+# KeyPress
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a keypress action, this property is always set to `keypress`. | |
+| **propertyKeys** | **kotlin.collections.List<kotlin.String>** | The combination of keys the model is requesting to be pressed. This is an array of strings, each representing a key. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | keypress |
+
+
+
diff --git a/lib/docs/ListFineTuningCheckpointPermissionResponse.md b/lib/docs/ListFineTuningCheckpointPermissionResponse.md
new file mode 100644
index 00000000..1a8a043e
--- /dev/null
+++ b/lib/docs/ListFineTuningCheckpointPermissionResponse.md
@@ -0,0 +1,21 @@
+
+# ListFineTuningCheckpointPermissionResponse
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`data`** | [**kotlin.collections.List<FineTuningCheckpointPermission>**](FineTuningCheckpointPermission.md) | | |
+| **`object`** | [**inline**](#`Object`) | | |
+| **hasMore** | **kotlin.Boolean** | | |
+| **firstId** | **kotlin.String** | | [optional] |
+| **lastId** | **kotlin.String** | | [optional] |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | list |
+
+
+
diff --git a/lib/docs/LogProb.md b/lib/docs/LogProb.md
new file mode 100644
index 00000000..cde7e7a1
--- /dev/null
+++ b/lib/docs/LogProb.md
@@ -0,0 +1,13 @@
+
+# LogProb
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **token** | **kotlin.String** | The token that was used to generate the log probability. | |
+| **logprob** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The log probability of the token. | |
+| **bytes** | **kotlin.collections.List<kotlin.Int>** | The bytes that were used to generate the log probability. | |
+| **topLogprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | | [optional] |
+
+
+
diff --git a/lib/docs/LogProbProperties.md b/lib/docs/LogProbProperties.md
new file mode 100644
index 00000000..1d799587
--- /dev/null
+++ b/lib/docs/LogProbProperties.md
@@ -0,0 +1,12 @@
+
+# LogProbProperties
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **token** | **kotlin.String** | The token that was used to generate the log probability. | |
+| **logprob** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The log probability of the token. | |
+| **bytes** | **kotlin.collections.List<kotlin.Int>** | The bytes that were used to generate the log probability. | |
+
+
+
diff --git a/lib/docs/RealtimeSessionModel.md b/lib/docs/ModelIds.md
similarity index 84%
rename from lib/docs/RealtimeSessionModel.md
rename to lib/docs/ModelIds.md
index fd210649..354db8f7 100644
--- a/lib/docs/RealtimeSessionModel.md
+++ b/lib/docs/ModelIds.md
@@ -1,5 +1,5 @@
-# RealtimeSessionModel
+# ModelIds
## Properties
| Name | Type | Description | Notes |
diff --git a/lib/docs/ModelIdsResponses.md b/lib/docs/ModelIdsResponses.md
new file mode 100644
index 00000000..04b727f5
--- /dev/null
+++ b/lib/docs/ModelIdsResponses.md
@@ -0,0 +1,9 @@
+
+# ModelIdsResponses
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+
+
+
diff --git a/lib/docs/ModelIdsShared.md b/lib/docs/ModelIdsShared.md
new file mode 100644
index 00000000..e04afee5
--- /dev/null
+++ b/lib/docs/ModelIdsShared.md
@@ -0,0 +1,9 @@
+
+# ModelIdsShared
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+
+
+
diff --git a/lib/docs/ModelResponseProperties.md b/lib/docs/ModelResponseProperties.md
new file mode 100644
index 00000000..fc347b19
--- /dev/null
+++ b/lib/docs/ModelResponseProperties.md
@@ -0,0 +1,13 @@
+
+# ModelResponseProperties
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
+| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] |
+| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] |
+| **user** | **kotlin.String** | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] |
+
+
+
diff --git a/lib/docs/ModifyAssistantRequest.md b/lib/docs/ModifyAssistantRequest.md
index 594fdd8f..80001c99 100644
--- a/lib/docs/ModifyAssistantRequest.md
+++ b/lib/docs/ModifyAssistantRequest.md
@@ -14,7 +14,7 @@
| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | [optional] |
| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. | [optional] |
-| **responseFormat** | [**AssistantObjectResponseFormat**](AssistantObjectResponseFormat.md) | | [optional] |
+| **responseFormat** | [**AssistantsApiResponseFormatOption**](AssistantsApiResponseFormatOption.md) | | [optional] |
diff --git a/lib/docs/Move.md b/lib/docs/Move.md
new file mode 100644
index 00000000..1a5c224c
--- /dev/null
+++ b/lib/docs/Move.md
@@ -0,0 +1,19 @@
+
+# Move
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a move action, this property is always set to `move`. | |
+| **x** | **kotlin.Int** | The x-coordinate to move to. | |
+| **y** | **kotlin.Int** | The y-coordinate to move to. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | move |
+
+
+
diff --git a/lib/docs/OpenAIFile.md b/lib/docs/OpenAIFile.md
index c605d54b..b2f9cbc8 100644
--- a/lib/docs/OpenAIFile.md
+++ b/lib/docs/OpenAIFile.md
@@ -11,6 +11,7 @@
| **`object`** | [**inline**](#`Object`) | The object type, which is always `file`. | |
| **purpose** | [**inline**](#Purpose) | The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. | |
| **status** | [**inline**](#Status) | Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. | |
+| **expiresAt** | **kotlin.Int** | The Unix timestamp (in seconds) for when the file will expire. | [optional] |
| **statusDetails** | **kotlin.String** | Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. | [optional] |
diff --git a/lib/docs/OutputAudio.md b/lib/docs/OutputAudio.md
new file mode 100644
index 00000000..5f23d1a5
--- /dev/null
+++ b/lib/docs/OutputAudio.md
@@ -0,0 +1,19 @@
+
+# OutputAudio
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the output audio. Always `output_audio`. | |
+| **`data`** | **kotlin.String** | Base64-encoded audio data from the model. | |
+| **transcript** | **kotlin.String** | The transcript of the audio data from the model. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | output_audio |
+
+
+
diff --git a/lib/docs/OutputContent.md b/lib/docs/OutputContent.md
new file mode 100644
index 00000000..45e0b5d6
--- /dev/null
+++ b/lib/docs/OutputContent.md
@@ -0,0 +1,20 @@
+
+# OutputContent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the output text. Always `output_text`. | |
+| **text** | **kotlin.String** | The text output from the model. | |
+| **annotations** | [**kotlin.collections.List<Annotation>**](Annotation.md) | The annotations of the text output. | |
+| **refusal** | **kotlin.String** | The refusal explanationfrom the model. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | output_text, refusal |
+
+
+
diff --git a/lib/docs/OutputItem.md b/lib/docs/OutputItem.md
new file mode 100644
index 00000000..f9cc70fc
--- /dev/null
+++ b/lib/docs/OutputItem.md
@@ -0,0 +1,43 @@
+
+# OutputItem
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The unique identifier of the reasoning content. | |
+| **type** | [**inline**](#Type) | The type of the output message. Always `message`. | |
+| **role** | [**inline**](#Role) | The role of the output message. Always `assistant`. | |
+| **content** | [**kotlin.collections.List<OutputContent>**](OutputContent.md) | The content of the output message. | |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | |
+| **queries** | **kotlin.collections.List<kotlin.String>** | The queries used to search for files. | |
+| **callId** | **kotlin.String** | An identifier used when responding to the tool call with output. | |
+| **name** | **kotlin.String** | The name of the function to run. | |
+| **arguments** | **kotlin.String** | A JSON string of the arguments to pass to the function. | |
+| **action** | [**ComputerAction**](ComputerAction.md) | | |
+| **pendingSafetyChecks** | [**kotlin.collections.List<ComputerToolCallSafetyCheck>**](ComputerToolCallSafetyCheck.md) | The pending safety checks for the computer call. | |
+| **summary** | [**kotlin.collections.List<ReasoningItemSummaryInner>**](ReasoningItemSummaryInner.md) | Reasoning text contents. | |
+| **results** | [**kotlin.collections.List<FileSearchToolCallResultsInner>**](FileSearchToolCallResultsInner.md) | The results of the file search tool call. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message, file_search_call, function_call, web_search_call, computer_call, reasoning |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | assistant |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/OutputMessage.md b/lib/docs/OutputMessage.md
new file mode 100644
index 00000000..ddee120a
--- /dev/null
+++ b/lib/docs/OutputMessage.md
@@ -0,0 +1,35 @@
+
+# OutputMessage
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The unique ID of the output message. | |
+| **type** | [**inline**](#Type) | The type of the output message. Always `message`. | |
+| **role** | [**inline**](#Role) | The role of the output message. Always `assistant`. | |
+| **content** | [**kotlin.collections.List<OutputContent>**](OutputContent.md) | The content of the output message. | |
+| **status** | [**inline**](#Status) | The status of the message input. One of `in_progress`, `completed`, or `incomplete`. Populated when input items are returned via API. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | message |
+
+
+
+## Enum: role
+| Name | Value |
+| ---- | ----- |
+| role | assistant |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/OutputText.md b/lib/docs/OutputText.md
new file mode 100644
index 00000000..5f3403c6
--- /dev/null
+++ b/lib/docs/OutputText.md
@@ -0,0 +1,19 @@
+
+# OutputText
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the output text. Always `output_text`. | |
+| **text** | **kotlin.String** | The text output from the model. | |
+| **annotations** | [**kotlin.collections.List<Annotation>**](Annotation.md) | The annotations of the text output. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | output_text |
+
+
+
diff --git a/lib/docs/RealtimeApi.md b/lib/docs/RealtimeApi.md
index 0115b3c2..2a5dd5b3 100644
--- a/lib/docs/RealtimeApi.md
+++ b/lib/docs/RealtimeApi.md
@@ -5,6 +5,7 @@ All URIs are relative to *https://api.openai.com/v1*
| Method | HTTP request | Description |
| ------------- | ------------- | ------------- |
| [**createRealtimeSession**](RealtimeApi.md#createRealtimeSession) | **POST** /realtime/sessions | Create an ephemeral API token for use in client-side applications with the Realtime API. Can be configured with the same session parameters as the `session.update` client event. It responds with a session object, plus a `client_secret` key which contains a usable ephemeral API token that can be used to authenticate browser clients for the Realtime API. |
+| [**createRealtimeTranscriptionSession**](RealtimeApi.md#createRealtimeTranscriptionSession) | **POST** /realtime/transcription_sessions | Create an ephemeral API token for use in client-side applications with the Realtime API specifically for realtime transcriptions. Can be configured with the same session parameters as the `transcription_session.update` client event. It responds with a session object, plus a `client_secret` key which contains a usable ephemeral API token that can be used to authenticate browser clients for the Realtime API. |
@@ -45,6 +46,52 @@ try {
### Authorization
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json
+
+
+# **createRealtimeTranscriptionSession**
+> RealtimeTranscriptionSessionCreateResponse createRealtimeTranscriptionSession(realtimeTranscriptionSessionCreateRequest)
+
+Create an ephemeral API token for use in client-side applications with the Realtime API specifically for realtime transcriptions. Can be configured with the same session parameters as the `transcription_session.update` client event. It responds with a session object, plus a `client_secret` key which contains a usable ephemeral API token that can be used to authenticate browser clients for the Realtime API.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = RealtimeApi()
+val realtimeTranscriptionSessionCreateRequest : RealtimeTranscriptionSessionCreateRequest = // RealtimeTranscriptionSessionCreateRequest | Create an ephemeral API key with the given session configuration.
+try {
+ val result : RealtimeTranscriptionSessionCreateResponse = apiInstance.createRealtimeTranscriptionSession(realtimeTranscriptionSessionCreateRequest)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling RealtimeApi#createRealtimeTranscriptionSession")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling RealtimeApi#createRealtimeTranscriptionSession")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **realtimeTranscriptionSessionCreateRequest** | [**RealtimeTranscriptionSessionCreateRequest**](RealtimeTranscriptionSessionCreateRequest.md)| Create an ephemeral API key with the given session configuration. | |
+
+### Return type
+
+[**RealtimeTranscriptionSessionCreateResponse**](RealtimeTranscriptionSessionCreateResponse.md)
+
+### Authorization
+
+
Configure ApiKeyAuth:
ApiClient.accessToken = ""
diff --git a/lib/docs/RealtimeClientEvent.md b/lib/docs/RealtimeClientEvent.md
new file mode 100644
index 00000000..b084915a
--- /dev/null
+++ b/lib/docs/RealtimeClientEvent.md
@@ -0,0 +1,27 @@
+
+# RealtimeClientEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The event type, must be `conversation.item.create`. | |
+| **item** | [**RealtimeConversationItem**](RealtimeConversationItem.md) | | |
+| **itemId** | **kotlin.String** | The ID of the assistant message item to truncate. Only assistant message items can be truncated. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part to truncate. Set this to 0. | |
+| **audioEndMs** | **kotlin.Int** | Inclusive duration up to which audio is truncated, in milliseconds. If the audio_end_ms is greater than the actual audio duration, the server will respond with an error. | |
+| **audio** | **kotlin.String** | Base64-encoded audio bytes. This must be in the format specified by the `input_audio_format` field in the session configuration. | |
+| **session** | [**RealtimeTranscriptionSessionCreateRequest**](RealtimeTranscriptionSessionCreateRequest.md) | | |
+| **eventId** | **kotlin.String** | Optional client-generated ID used to identify this event. | [optional] |
+| **previousItemId** | **kotlin.String** | The ID of the preceding item after which the new item will be inserted. If not set, the new item will be appended to the end of the conversation. If set to `root`, the new item will be added to the beginning of the conversation. If set to an existing ID, it allows an item to be inserted mid-conversation. If the ID cannot be found, an error will be returned and the item will not be added. | [optional] |
+| **responseId** | **kotlin.String** | A specific response ID to cancel - if not provided, will cancel an in-progress response in the default conversation. | [optional] |
+| **response** | [**RealtimeResponseCreateParams**](RealtimeResponseCreateParams.md) | | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | conversation.item.create, conversation.item.delete, conversation.item.retrieve, conversation.item.truncate, input_audio_buffer.append, input_audio_buffer.clear, input_audio_buffer.commit, response.cancel, response.create, session.update, transcription_session.update |
+
+
+
diff --git a/lib/docs/RealtimeClientEventConversationItemRetrieve.md b/lib/docs/RealtimeClientEventConversationItemRetrieve.md
new file mode 100644
index 00000000..3004b266
--- /dev/null
+++ b/lib/docs/RealtimeClientEventConversationItemRetrieve.md
@@ -0,0 +1,19 @@
+
+# RealtimeClientEventConversationItemRetrieve
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The event type, must be `conversation.item.retrieve`. | |
+| **itemId** | **kotlin.String** | The ID of the item to retrieve. | |
+| **eventId** | **kotlin.String** | Optional client-generated ID used to identify this event. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | conversation.item.retrieve |
+
+
+
diff --git a/lib/docs/RealtimeClientEventTranscriptionSessionUpdate.md b/lib/docs/RealtimeClientEventTranscriptionSessionUpdate.md
new file mode 100644
index 00000000..f83c497d
--- /dev/null
+++ b/lib/docs/RealtimeClientEventTranscriptionSessionUpdate.md
@@ -0,0 +1,19 @@
+
+# RealtimeClientEventTranscriptionSessionUpdate
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The event type, must be `transcription_session.update`. | |
+| **session** | [**RealtimeTranscriptionSessionCreateRequest**](RealtimeTranscriptionSessionCreateRequest.md) | | |
+| **eventId** | **kotlin.String** | Optional client-generated ID used to identify this event. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | transcription_session.update |
+
+
+
diff --git a/lib/docs/RealtimeResponse.md b/lib/docs/RealtimeResponse.md
index b49423a6..ac6a123e 100644
--- a/lib/docs/RealtimeResponse.md
+++ b/lib/docs/RealtimeResponse.md
@@ -12,7 +12,7 @@
| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | [optional] |
| **usage** | [**RealtimeResponseUsage**](RealtimeResponseUsage.md) | | [optional] |
| **conversationId** | **kotlin.String** | Which conversation the response is added to, determined by the `conversation` field in the `response.create` event. If `auto`, the response will be added to the default conversation and the value of `conversation_id` will be an id like `conv_1234`. If `none`, the response will not be added to any conversation and the value of `conversation_id` will be `null`. If responses are being triggered by server VAD, the response will be added to the default conversation, thus the `conversation_id` will be an id like `conv_1234`. | [optional] |
-| **voice** | [**inline**](#Voice) | The voice the model used to respond. Current voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. | [optional] |
+| **voice** | [**VoiceIdsShared**](VoiceIdsShared.md) | | [optional] |
| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | The set of modalities the model used to respond. If there are multiple modalities, the model will pick one, for example if `modalities` is `[\"text\", \"audio\"]`, the model could be responding in either text or audio. | [optional] |
| **outputAudioFormat** | [**inline**](#OutputAudioFormat) | The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. | [optional] |
| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. | [optional] |
@@ -33,13 +33,6 @@
| status | completed, cancelled, failed, incomplete |
-
-## Enum: voice
-| Name | Value |
-| ---- | ----- |
-| voice | alloy, ash, ballad, coral, echo, sage, shimmer, verse |
-
-
## Enum: modalities
| Name | Value |
diff --git a/lib/docs/RealtimeResponseCreateParams.md b/lib/docs/RealtimeResponseCreateParams.md
index 2d0a0725..59a66da8 100644
--- a/lib/docs/RealtimeResponseCreateParams.md
+++ b/lib/docs/RealtimeResponseCreateParams.md
@@ -6,7 +6,7 @@
| ------------ | ------------- | ------------- | ------------- |
| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | The set of modalities the model can respond with. To disable audio, set this to [\"text\"]. | [optional] |
| **instructions** | **kotlin.String** | The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior. Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session. | [optional] |
-| **voice** | [**inline**](#Voice) | The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. | [optional] |
+| **voice** | [**VoiceIdsShared**](VoiceIdsShared.md) | | [optional] |
| **outputAudioFormat** | [**inline**](#OutputAudioFormat) | The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. | [optional] |
| **tools** | [**kotlin.collections.List<RealtimeResponseCreateParamsToolsInner>**](RealtimeResponseCreateParamsToolsInner.md) | Tools (functions) available to the model. | [optional] |
| **toolChoice** | **kotlin.String** | How the model chooses tools. Options are `auto`, `none`, `required`, or specify a function, like `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}`. | [optional] |
@@ -24,13 +24,6 @@
| modalities | text, audio |
-
-## Enum: voice
-| Name | Value |
-| ---- | ----- |
-| voice | alloy, ash, ballad, coral, echo, sage, shimmer, verse |
-
-
## Enum: output_audio_format
| Name | Value |
diff --git a/lib/docs/RealtimeServerEvent.md b/lib/docs/RealtimeServerEvent.md
new file mode 100644
index 00000000..ebe8fcb2
--- /dev/null
+++ b/lib/docs/RealtimeServerEvent.md
@@ -0,0 +1,38 @@
+
+# RealtimeServerEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **eventId** | **kotlin.String** | The unique ID of the server event. | |
+| **type** | [**inline**](#Type) | The event type, must be `conversation.created`. | |
+| **conversation** | [**RealtimeServerEventConversationCreatedConversation**](RealtimeServerEventConversationCreatedConversation.md) | | |
+| **previousItemId** | **kotlin.String** | The ID of the preceding item after which the new item will be inserted. | |
+| **item** | [**RealtimeConversationItem**](RealtimeConversationItem.md) | | |
+| **itemId** | **kotlin.String** | The ID of the item. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part in the item's content array. | |
+| **transcript** | **kotlin.String** | The final transcript of the audio. | |
+| **delta** | **kotlin.String** | The text delta. | |
+| **error** | [**RealtimeServerEventErrorError**](RealtimeServerEventErrorError.md) | | |
+| **audioEndMs** | **kotlin.Int** | Milliseconds since the session started when speech stopped. This will correspond to the end of audio sent to the model, and thus includes the `min_silence_duration_ms` configured in the Session. | |
+| **audioStartMs** | **kotlin.Int** | Milliseconds from the start of all audio written to the buffer during the session when speech was first detected. This will correspond to the beginning of audio sent to the model, and thus includes the `prefix_padding_ms` configured in the Session. | |
+| **rateLimits** | [**kotlin.collections.List<RealtimeServerEventRateLimitsUpdatedRateLimitsInner>**](RealtimeServerEventRateLimitsUpdatedRateLimitsInner.md) | List of rate limit information. | |
+| **responseId** | **kotlin.String** | The ID of the response. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item in the response. | |
+| **part** | [**RealtimeServerEventResponseContentPartDonePart**](RealtimeServerEventResponseContentPartDonePart.md) | | |
+| **response** | [**RealtimeResponse**](RealtimeResponse.md) | | |
+| **callId** | **kotlin.String** | The ID of the function call. | |
+| **arguments** | **kotlin.String** | The final arguments as a JSON string. | |
+| **text** | **kotlin.String** | The final text content. | |
+| **session** | [**RealtimeTranscriptionSessionCreateResponse**](RealtimeTranscriptionSessionCreateResponse.md) | | |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the transcription. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | conversation.created, conversation.item.created, conversation.item.deleted, conversation.item.input_audio_transcription.completed, conversation.item.input_audio_transcription.delta, conversation.item.input_audio_transcription.failed, conversation.item.retrieved, conversation.item.truncated, error, input_audio_buffer.cleared, input_audio_buffer.committed, input_audio_buffer.speech_started, input_audio_buffer.speech_stopped, rate_limits.updated, response.audio.delta, response.audio.done, response.audio_transcript.delta, response.audio_transcript.done, response.content_part.added, response.content_part.done, response.created, response.done, response.function_call_arguments.delta, response.function_call_arguments.done, response.output_item.added, response.output_item.done, response.text.delta, response.text.done, session.created, session.updated, transcription_session.updated |
+
+
+
diff --git a/lib/docs/RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.md b/lib/docs/RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.md
index c5240b0d..3f7d9e78 100644
--- a/lib/docs/RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.md
+++ b/lib/docs/RealtimeServerEventConversationItemInputAudioTranscriptionCompleted.md
@@ -9,6 +9,7 @@
| **itemId** | **kotlin.String** | The ID of the user message item containing the audio. | |
| **contentIndex** | **kotlin.Int** | The index of the content part containing the audio. | |
| **transcript** | **kotlin.String** | The transcribed text. | |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the transcription. | [optional] |
diff --git a/lib/docs/RealtimeServerEventConversationItemInputAudioTranscriptionDelta.md b/lib/docs/RealtimeServerEventConversationItemInputAudioTranscriptionDelta.md
new file mode 100644
index 00000000..78687b7c
--- /dev/null
+++ b/lib/docs/RealtimeServerEventConversationItemInputAudioTranscriptionDelta.md
@@ -0,0 +1,22 @@
+
+# RealtimeServerEventConversationItemInputAudioTranscriptionDelta
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **eventId** | **kotlin.String** | The unique ID of the server event. | |
+| **type** | [**inline**](#Type) | The event type, must be `conversation.item.input_audio_transcription.delta`. | |
+| **itemId** | **kotlin.String** | The ID of the item. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part in the item's content array. | [optional] |
+| **delta** | **kotlin.String** | The text delta. | [optional] |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the transcription. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | conversation.item.input_audio_transcription.delta |
+
+
+
diff --git a/lib/docs/RealtimeServerEventConversationItemRetrieved.md b/lib/docs/RealtimeServerEventConversationItemRetrieved.md
new file mode 100644
index 00000000..f5b5c95d
--- /dev/null
+++ b/lib/docs/RealtimeServerEventConversationItemRetrieved.md
@@ -0,0 +1,19 @@
+
+# RealtimeServerEventConversationItemRetrieved
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **eventId** | **kotlin.String** | The unique ID of the server event. | |
+| **type** | [**inline**](#Type) | The event type, must be `conversation.item.retrieved`. | |
+| **item** | [**RealtimeConversationItem**](RealtimeConversationItem.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | conversation.item.retrieved |
+
+
+
diff --git a/lib/docs/RealtimeServerEventTranscriptionSessionUpdated.md b/lib/docs/RealtimeServerEventTranscriptionSessionUpdated.md
new file mode 100644
index 00000000..4d6ace35
--- /dev/null
+++ b/lib/docs/RealtimeServerEventTranscriptionSessionUpdated.md
@@ -0,0 +1,19 @@
+
+# RealtimeServerEventTranscriptionSessionUpdated
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **eventId** | **kotlin.String** | The unique ID of the server event. | |
+| **type** | [**inline**](#Type) | The event type, must be `transcription_session.updated`. | |
+| **session** | [**RealtimeTranscriptionSessionCreateResponse**](RealtimeTranscriptionSessionCreateResponse.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | transcription_session.updated |
+
+
+
diff --git a/lib/docs/RealtimeSession.md b/lib/docs/RealtimeSession.md
index e11ff632..65258b27 100644
--- a/lib/docs/RealtimeSession.md
+++ b/lib/docs/RealtimeSession.md
@@ -4,18 +4,19 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **id** | **kotlin.String** | Unique identifier for the session object. | [optional] |
+| **id** | **kotlin.String** | Unique identifier for the session that looks like `sess_1234567890abcdef`. | [optional] |
| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | The set of modalities the model can respond with. To disable audio, set this to [\"text\"]. | [optional] |
-| **model** | [**RealtimeSessionModel**](RealtimeSessionModel.md) | | [optional] |
-| **instructions** | **kotlin.String** | The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior. Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session. | [optional] |
-| **voice** | [**inline**](#Voice) | The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. | [optional] |
+| **model** | [**inline**](#Model) | The Realtime model used for this session. | [optional] |
+| **instructions** | **kotlin.String** | The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior. Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session. | [optional] |
+| **voice** | [**VoiceIdsShared**](VoiceIdsShared.md) | | [optional] |
| **inputAudioFormat** | [**inline**](#InputAudioFormat) | The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian byte order. | [optional] |
| **outputAudioFormat** | [**inline**](#OutputAudioFormat) | The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is sampled at a rate of 24kHz. | [optional] |
| **inputAudioTranscription** | [**RealtimeSessionInputAudioTranscription**](RealtimeSessionInputAudioTranscription.md) | | [optional] |
| **turnDetection** | [**RealtimeSessionTurnDetection**](RealtimeSessionTurnDetection.md) | | [optional] |
+| **inputAudioNoiseReduction** | [**RealtimeSessionInputAudioNoiseReduction**](RealtimeSessionInputAudioNoiseReduction.md) | | [optional] |
| **tools** | [**kotlin.collections.List<RealtimeResponseCreateParamsToolsInner>**](RealtimeResponseCreateParamsToolsInner.md) | Tools (functions) available to the model. | [optional] |
| **toolChoice** | **kotlin.String** | How the model chooses tools. Options are `auto`, `none`, `required`, or specify a function. | [optional] |
-| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. | [optional] |
+| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a temperature of 0.8 is highly recommended for best performance. | [optional] |
| **maxResponseOutputTokens** | [**RealtimeResponseCreateParamsMaxResponseOutputTokens**](RealtimeResponseCreateParamsMaxResponseOutputTokens.md) | | [optional] |
@@ -26,11 +27,11 @@
| modalities | text, audio |
-
-## Enum: voice
+
+## Enum: model
| Name | Value |
| ---- | ----- |
-| voice | alloy, ash, ballad, coral, echo, sage, shimmer, verse |
+| model | gpt-4o-realtime-preview, gpt-4o-realtime-preview-2024-10-01, gpt-4o-realtime-preview-2024-12-17, gpt-4o-mini-realtime-preview, gpt-4o-mini-realtime-preview-2024-12-17 |
diff --git a/lib/docs/RealtimeSessionCreateRequest.md b/lib/docs/RealtimeSessionCreateRequest.md
index 18f21612..e65a33c2 100644
--- a/lib/docs/RealtimeSessionCreateRequest.md
+++ b/lib/docs/RealtimeSessionCreateRequest.md
@@ -6,15 +6,16 @@
| ------------ | ------------- | ------------- | ------------- |
| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | The set of modalities the model can respond with. To disable audio, set this to [\"text\"]. | [optional] |
| **model** | [**inline**](#Model) | The Realtime model used for this session. | [optional] |
-| **instructions** | **kotlin.String** | The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior. Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session. | [optional] |
-| **voice** | [**inline**](#Voice) | The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. | [optional] |
+| **instructions** | **kotlin.String** | The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior. Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session. | [optional] |
+| **voice** | [**VoiceIdsShared**](VoiceIdsShared.md) | | [optional] |
| **inputAudioFormat** | [**inline**](#InputAudioFormat) | The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian byte order. | [optional] |
| **outputAudioFormat** | [**inline**](#OutputAudioFormat) | The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is sampled at a rate of 24kHz. | [optional] |
-| **inputAudioTranscription** | [**RealtimeSessionCreateRequestInputAudioTranscription**](RealtimeSessionCreateRequestInputAudioTranscription.md) | | [optional] |
-| **turnDetection** | [**RealtimeSessionCreateRequestTurnDetection**](RealtimeSessionCreateRequestTurnDetection.md) | | [optional] |
+| **inputAudioTranscription** | [**RealtimeSessionInputAudioTranscription**](RealtimeSessionInputAudioTranscription.md) | | [optional] |
+| **turnDetection** | [**RealtimeSessionTurnDetection**](RealtimeSessionTurnDetection.md) | | [optional] |
+| **inputAudioNoiseReduction** | [**RealtimeSessionInputAudioNoiseReduction**](RealtimeSessionInputAudioNoiseReduction.md) | | [optional] |
| **tools** | [**kotlin.collections.List<RealtimeResponseCreateParamsToolsInner>**](RealtimeResponseCreateParamsToolsInner.md) | Tools (functions) available to the model. | [optional] |
| **toolChoice** | **kotlin.String** | How the model chooses tools. Options are `auto`, `none`, `required`, or specify a function. | [optional] |
-| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. | [optional] |
+| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a temperature of 0.8 is highly recommended for best performance. | [optional] |
| **maxResponseOutputTokens** | [**RealtimeResponseCreateParamsMaxResponseOutputTokens**](RealtimeResponseCreateParamsMaxResponseOutputTokens.md) | | [optional] |
@@ -32,13 +33,6 @@
| model | gpt-4o-realtime-preview, gpt-4o-realtime-preview-2024-10-01, gpt-4o-realtime-preview-2024-12-17, gpt-4o-mini-realtime-preview, gpt-4o-mini-realtime-preview-2024-12-17 |
-
-## Enum: voice
-| Name | Value |
-| ---- | ----- |
-| voice | alloy, ash, ballad, coral, echo, sage, shimmer, verse |
-
-
## Enum: input_audio_format
| Name | Value |
diff --git a/lib/docs/RealtimeSessionCreateRequestTurnDetection.md b/lib/docs/RealtimeSessionCreateRequestTurnDetection.md
deleted file mode 100644
index bce7e248..00000000
--- a/lib/docs/RealtimeSessionCreateRequestTurnDetection.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-# RealtimeSessionCreateRequestTurnDetection
-
-## Properties
-| Name | Type | Description | Notes |
-| ------------ | ------------- | ------------- | ------------- |
-| **type** | **kotlin.String** | Type of turn detection, only `server_vad` is currently supported. | [optional] |
-| **threshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the model, and thus might perform better in noisy environments. | [optional] |
-| **prefixPaddingMs** | **kotlin.Int** | Amount of audio to include before the VAD detected speech (in milliseconds). Defaults to 300ms. | [optional] |
-| **silenceDurationMs** | **kotlin.Int** | Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more quickly, but may jump in on short pauses from the user. | [optional] |
-| **createResponse** | **kotlin.Boolean** | Whether or not to automatically generate a response when VAD is enabled. `true` by default. | [optional] |
-
-
-
diff --git a/lib/docs/RealtimeSessionCreateResponse.md b/lib/docs/RealtimeSessionCreateResponse.md
index 68c0fe1d..968931d3 100644
--- a/lib/docs/RealtimeSessionCreateResponse.md
+++ b/lib/docs/RealtimeSessionCreateResponse.md
@@ -7,10 +7,10 @@
| **clientSecret** | [**RealtimeSessionCreateResponseClientSecret**](RealtimeSessionCreateResponseClientSecret.md) | | |
| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | The set of modalities the model can respond with. To disable audio, set this to [\"text\"]. | [optional] |
| **instructions** | **kotlin.String** | The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior. Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session. | [optional] |
-| **voice** | [**inline**](#Voice) | The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. | [optional] |
+| **voice** | [**VoiceIdsShared**](VoiceIdsShared.md) | | [optional] |
| **inputAudioFormat** | **kotlin.String** | The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. | [optional] |
| **outputAudioFormat** | **kotlin.String** | The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. | [optional] |
-| **inputAudioTranscription** | [**RealtimeSessionInputAudioTranscription**](RealtimeSessionInputAudioTranscription.md) | | [optional] |
+| **inputAudioTranscription** | [**RealtimeSessionCreateResponseInputAudioTranscription**](RealtimeSessionCreateResponseInputAudioTranscription.md) | | [optional] |
| **turnDetection** | [**RealtimeSessionCreateResponseTurnDetection**](RealtimeSessionCreateResponseTurnDetection.md) | | [optional] |
| **tools** | [**kotlin.collections.List<RealtimeResponseCreateParamsToolsInner>**](RealtimeResponseCreateParamsToolsInner.md) | Tools (functions) available to the model. | [optional] |
| **toolChoice** | **kotlin.String** | How the model chooses tools. Options are `auto`, `none`, `required`, or specify a function. | [optional] |
@@ -25,11 +25,4 @@
| modalities | text, audio |
-
-## Enum: voice
-| Name | Value |
-| ---- | ----- |
-| voice | alloy, ash, ballad, coral, echo, sage, shimmer, verse |
-
-
diff --git a/lib/docs/RealtimeSessionCreateResponseInputAudioTranscription.md b/lib/docs/RealtimeSessionCreateResponseInputAudioTranscription.md
new file mode 100644
index 00000000..1fd7d5d6
--- /dev/null
+++ b/lib/docs/RealtimeSessionCreateResponseInputAudioTranscription.md
@@ -0,0 +1,10 @@
+
+# RealtimeSessionCreateResponseInputAudioTranscription
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **model** | **kotlin.String** | The model to use for transcription, `whisper-1` is the only currently supported model. | [optional] |
+
+
+
diff --git a/lib/docs/RealtimeSessionInputAudioNoiseReduction.md b/lib/docs/RealtimeSessionInputAudioNoiseReduction.md
new file mode 100644
index 00000000..d37263ea
--- /dev/null
+++ b/lib/docs/RealtimeSessionInputAudioNoiseReduction.md
@@ -0,0 +1,17 @@
+
+# RealtimeSessionInputAudioNoiseReduction
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Type of noise reduction. `near_field` is for close-talking microphones such as headphones, `far_field` is for far-field microphones such as laptop or conference room microphones. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | near_field, far_field |
+
+
+
diff --git a/lib/docs/RealtimeSessionInputAudioTranscription.md b/lib/docs/RealtimeSessionInputAudioTranscription.md
index a80c0449..e4e4b5d9 100644
--- a/lib/docs/RealtimeSessionInputAudioTranscription.md
+++ b/lib/docs/RealtimeSessionInputAudioTranscription.md
@@ -4,7 +4,9 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **model** | **kotlin.String** | The model to use for transcription, `whisper-1` is the only currently supported model. | [optional] |
+| **model** | **kotlin.String** | The model to use for transcription, current options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1`. | [optional] |
+| **language** | **kotlin.String** | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) format will improve accuracy and latency. | [optional] |
+| **prompt** | **kotlin.String** | An optional text to guide the model's style or continue a previous audio segment. For `whisper-1`, the [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For `gpt-4o-transcribe` models, the prompt is a free text string, for example \"expect words related to technology\". | [optional] |
diff --git a/lib/docs/RealtimeSessionTurnDetection.md b/lib/docs/RealtimeSessionTurnDetection.md
index 38e34c6c..38a0c27f 100644
--- a/lib/docs/RealtimeSessionTurnDetection.md
+++ b/lib/docs/RealtimeSessionTurnDetection.md
@@ -4,17 +4,27 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **type** | [**inline**](#Type) | Type of turn detection, only `server_vad` is currently supported. | [optional] |
-| **threshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the model, and thus might perform better in noisy environments. | [optional] |
-| **prefixPaddingMs** | **kotlin.Int** | Amount of audio to include before the VAD detected speech (in milliseconds). Defaults to 300ms. | [optional] |
-| **silenceDurationMs** | **kotlin.Int** | Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more quickly, but may jump in on short pauses from the user. | [optional] |
+| **type** | [**inline**](#Type) | Type of turn detection. | [optional] |
+| **eagerness** | [**inline**](#Eagerness) | Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` will wait longer for the user to continue speaking, `high` will respond more quickly. `auto` is the default and is equivalent to `medium`. | [optional] |
+| **threshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the model, and thus might perform better in noisy environments. | [optional] |
+| **prefixPaddingMs** | **kotlin.Int** | Used only for `server_vad` mode. Amount of audio to include before the VAD detected speech (in milliseconds). Defaults to 300ms. | [optional] |
+| **silenceDurationMs** | **kotlin.Int** | Used only for `server_vad` mode. Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more quickly, but may jump in on short pauses from the user. | [optional] |
+| **createResponse** | **kotlin.Boolean** | Whether or not to automatically generate a response when a VAD stop event occurs. | [optional] |
+| **interruptResponse** | **kotlin.Boolean** | Whether or not to automatically interrupt any ongoing response with output to the default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs. | [optional] |
## Enum: type
| Name | Value |
| ---- | ----- |
-| type | server_vad |
+| type | server_vad, semantic_vad |
+
+
+
+## Enum: eagerness
+| Name | Value |
+| ---- | ----- |
+| eagerness | low, medium, high, auto |
diff --git a/lib/docs/RealtimeTranscriptionSessionCreateRequest.md b/lib/docs/RealtimeTranscriptionSessionCreateRequest.md
new file mode 100644
index 00000000..4ee3436a
--- /dev/null
+++ b/lib/docs/RealtimeTranscriptionSessionCreateRequest.md
@@ -0,0 +1,29 @@
+
+# RealtimeTranscriptionSessionCreateRequest
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | The set of modalities the model can respond with. To disable audio, set this to [\"text\"]. | [optional] |
+| **inputAudioFormat** | [**inline**](#InputAudioFormat) | The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian byte order. | [optional] |
+| **inputAudioTranscription** | [**RealtimeTranscriptionSessionCreateRequestInputAudioTranscription**](RealtimeTranscriptionSessionCreateRequestInputAudioTranscription.md) | | [optional] |
+| **turnDetection** | [**RealtimeTranscriptionSessionCreateRequestTurnDetection**](RealtimeTranscriptionSessionCreateRequestTurnDetection.md) | | [optional] |
+| **inputAudioNoiseReduction** | [**RealtimeSessionInputAudioNoiseReduction**](RealtimeSessionInputAudioNoiseReduction.md) | | [optional] |
+| **include** | **kotlin.collections.List<kotlin.String>** | The set of items to include in the transcription. Current available items are: - `item.input_audio_transcription.logprobs` | [optional] |
+
+
+
+## Enum: modalities
+| Name | Value |
+| ---- | ----- |
+| modalities | text, audio |
+
+
+
+## Enum: input_audio_format
+| Name | Value |
+| ---- | ----- |
+| inputAudioFormat | pcm16, g711_ulaw, g711_alaw |
+
+
+
diff --git a/lib/docs/RealtimeTranscriptionSessionCreateRequestInputAudioTranscription.md b/lib/docs/RealtimeTranscriptionSessionCreateRequestInputAudioTranscription.md
new file mode 100644
index 00000000..f9762c3c
--- /dev/null
+++ b/lib/docs/RealtimeTranscriptionSessionCreateRequestInputAudioTranscription.md
@@ -0,0 +1,19 @@
+
+# RealtimeTranscriptionSessionCreateRequestInputAudioTranscription
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **model** | [**inline**](#Model) | The model to use for transcription, current options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1`. | [optional] |
+| **language** | **kotlin.String** | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) format will improve accuracy and latency. | [optional] |
+| **prompt** | **kotlin.String** | An optional text to guide the model's style or continue a previous audio segment. For `whisper-1`, the [prompt is a list of keywords](/docs/guides/speech-to-text#prompting). For `gpt-4o-transcribe` models, the prompt is a free text string, for example \"expect words related to technology\". | [optional] |
+
+
+
+## Enum: model
+| Name | Value |
+| ---- | ----- |
+| model | gpt-4o-transcribe, gpt-4o-mini-transcribe, whisper-1 |
+
+
+
diff --git a/lib/docs/RealtimeTranscriptionSessionCreateRequestTurnDetection.md b/lib/docs/RealtimeTranscriptionSessionCreateRequestTurnDetection.md
new file mode 100644
index 00000000..a4ccfdf5
--- /dev/null
+++ b/lib/docs/RealtimeTranscriptionSessionCreateRequestTurnDetection.md
@@ -0,0 +1,30 @@
+
+# RealtimeTranscriptionSessionCreateRequestTurnDetection
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Type of turn detection. | [optional] |
+| **eagerness** | [**inline**](#Eagerness) | Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` will wait longer for the user to continue speaking, `high` will respond more quickly. `auto` is the default and is equivalent to `medium`. | [optional] |
+| **threshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the model, and thus might perform better in noisy environments. | [optional] |
+| **prefixPaddingMs** | **kotlin.Int** | Used only for `server_vad` mode. Amount of audio to include before the VAD detected speech (in milliseconds). Defaults to 300ms. | [optional] |
+| **silenceDurationMs** | **kotlin.Int** | Used only for `server_vad` mode. Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more quickly, but may jump in on short pauses from the user. | [optional] |
+| **createResponse** | **kotlin.Boolean** | Whether or not to automatically generate a response when a VAD stop event occurs. Not available for transcription sessions. | [optional] |
+| **interruptResponse** | **kotlin.Boolean** | Whether or not to automatically interrupt any ongoing response with output to the default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs. Not available for transcription sessions. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | server_vad, semantic_vad |
+
+
+
+## Enum: eagerness
+| Name | Value |
+| ---- | ----- |
+| eagerness | low, medium, high, auto |
+
+
+
diff --git a/lib/docs/RealtimeTranscriptionSessionCreateResponse.md b/lib/docs/RealtimeTranscriptionSessionCreateResponse.md
new file mode 100644
index 00000000..6e4e840f
--- /dev/null
+++ b/lib/docs/RealtimeTranscriptionSessionCreateResponse.md
@@ -0,0 +1,21 @@
+
+# RealtimeTranscriptionSessionCreateResponse
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **clientSecret** | [**RealtimeTranscriptionSessionCreateResponseClientSecret**](RealtimeTranscriptionSessionCreateResponseClientSecret.md) | | |
+| **modalities** | [**inline**](#kotlin.collections.List<Modalities>) | The set of modalities the model can respond with. To disable audio, set this to [\"text\"]. | [optional] |
+| **inputAudioFormat** | **kotlin.String** | The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. | [optional] |
+| **inputAudioTranscription** | [**RealtimeTranscriptionSessionCreateResponseInputAudioTranscription**](RealtimeTranscriptionSessionCreateResponseInputAudioTranscription.md) | | [optional] |
+| **turnDetection** | [**RealtimeSessionCreateResponseTurnDetection**](RealtimeSessionCreateResponseTurnDetection.md) | | [optional] |
+
+
+
+## Enum: modalities
+| Name | Value |
+| ---- | ----- |
+| modalities | text, audio |
+
+
+
diff --git a/lib/docs/RealtimeTranscriptionSessionCreateResponseClientSecret.md b/lib/docs/RealtimeTranscriptionSessionCreateResponseClientSecret.md
new file mode 100644
index 00000000..e45b89ea
--- /dev/null
+++ b/lib/docs/RealtimeTranscriptionSessionCreateResponseClientSecret.md
@@ -0,0 +1,11 @@
+
+# RealtimeTranscriptionSessionCreateResponseClientSecret
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`value`** | **kotlin.String** | Ephemeral key usable in client environments to authenticate connections to the Realtime API. Use this in client-side environments rather than a standard API token, which should only be used server-side. | |
+| **expiresAt** | **kotlin.Int** | Timestamp for when the token expires. Currently, all tokens expire after one minute. | |
+
+
+
diff --git a/lib/docs/RealtimeSessionCreateRequestInputAudioTranscription.md b/lib/docs/RealtimeTranscriptionSessionCreateResponseInputAudioTranscription.md
similarity index 60%
rename from lib/docs/RealtimeSessionCreateRequestInputAudioTranscription.md
rename to lib/docs/RealtimeTranscriptionSessionCreateResponseInputAudioTranscription.md
index 8b534bd5..b0da473d 100644
--- a/lib/docs/RealtimeSessionCreateRequestInputAudioTranscription.md
+++ b/lib/docs/RealtimeTranscriptionSessionCreateResponseInputAudioTranscription.md
@@ -1,12 +1,19 @@
-# RealtimeSessionCreateRequestInputAudioTranscription
+# RealtimeTranscriptionSessionCreateResponseInputAudioTranscription
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **model** | **kotlin.String** | The model to use for transcription, `whisper-1` is the only currently supported model. | [optional] |
+| **model** | [**inline**](#Model) | The model to use for transcription. Can be `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, or `whisper-1`. | [optional] |
| **language** | **kotlin.String** | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) format will improve accuracy and latency. | [optional] |
| **prompt** | **kotlin.String** | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text#prompting) should match the audio language. | [optional] |
+
+## Enum: model
+| Name | Value |
+| ---- | ----- |
+| model | gpt-4o-transcribe, gpt-4o-mini-transcribe, whisper-1 |
+
+
diff --git a/lib/docs/Reasoning.md b/lib/docs/Reasoning.md
new file mode 100644
index 00000000..ccc0975c
--- /dev/null
+++ b/lib/docs/Reasoning.md
@@ -0,0 +1,18 @@
+
+# Reasoning
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **effort** | [**ReasoningEffort**](ReasoningEffort.md) | | [optional] |
+| **generateSummary** | [**inline**](#GenerateSummary) | **computer_use_preview only** A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or `detailed`. | [optional] |
+
+
+
+## Enum: generate_summary
+| Name | Value |
+| ---- | ----- |
+| generateSummary | concise, detailed |
+
+
+
diff --git a/lib/docs/ReasoningItem.md b/lib/docs/ReasoningItem.md
new file mode 100644
index 00000000..8dcbc3df
--- /dev/null
+++ b/lib/docs/ReasoningItem.md
@@ -0,0 +1,27 @@
+
+# ReasoningItem
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the object. Always `reasoning`. | |
+| **id** | **kotlin.String** | The unique identifier of the reasoning content. | |
+| **summary** | [**kotlin.collections.List<ReasoningItemSummaryInner>**](ReasoningItemSummaryInner.md) | Reasoning text contents. | |
+| **status** | [**inline**](#Status) | The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | reasoning |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, completed, incomplete |
+
+
+
diff --git a/lib/docs/ReasoningItemSummaryInner.md b/lib/docs/ReasoningItemSummaryInner.md
new file mode 100644
index 00000000..2685aa99
--- /dev/null
+++ b/lib/docs/ReasoningItemSummaryInner.md
@@ -0,0 +1,18 @@
+
+# ReasoningItemSummaryInner
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the object. Always `summary_text`. | |
+| **text** | **kotlin.String** | A short summary of the reasoning used by the model when generating the response. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | summary_text |
+
+
+
diff --git a/lib/docs/Refusal.md b/lib/docs/Refusal.md
new file mode 100644
index 00000000..c0981812
--- /dev/null
+++ b/lib/docs/Refusal.md
@@ -0,0 +1,18 @@
+
+# Refusal
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the refusal. Always `refusal`. | |
+| **refusal** | **kotlin.String** | The refusal explanationfrom the model. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | refusal |
+
+
+
diff --git a/lib/docs/Response.md b/lib/docs/Response.md
new file mode 100644
index 00000000..935b7104
--- /dev/null
+++ b/lib/docs/Response.md
@@ -0,0 +1,53 @@
+
+# Response
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | |
+| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | |
+| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | |
+| **model** | [**ModelIdsResponses**](ModelIdsResponses.md) | | |
+| **instructions** | **kotlin.String** | Inserts a system (or developer) message as the first item in the model's context. When using along with `previous_response_id`, the instructions from a previous response will be not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. | |
+| **tools** | [**kotlin.collections.List<Tool>**](Tool.md) | An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. The two categories of tools you can provide the model are: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like [web search](/docs/guides/tools-web-search) or [file search](/docs/guides/tools-file-search). Learn more about [built-in tools](/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling the model to call your own code. Learn more about [function calling](/docs/guides/function-calling). | |
+| **toolChoice** | [**ResponsePropertiesToolChoice**](ResponsePropertiesToolChoice.md) | | |
+| **id** | **kotlin.String** | Unique identifier for this Response. | |
+| **`object`** | [**inline**](#`Object`) | The object type of this resource - always set to `response`. | |
+| **createdAt** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | Unix timestamp (in seconds) of when this Response was created. | |
+| **error** | [**ResponseError**](ResponseError.md) | | |
+| **incompleteDetails** | [**ResponseAllOfIncompleteDetails**](ResponseAllOfIncompleteDetails.md) | | |
+| **output** | [**kotlin.collections.List<OutputItem>**](OutputItem.md) | An array of content items generated by the model. - The length and order of items in the `output` array is dependent on the model's response. - Rather than accessing the first item in the `output` array and assuming it's an `assistant` message with the content generated by the model, you might consider using the `output_text` property where supported in SDKs. | |
+| **parallelToolCalls** | **kotlin.Boolean** | Whether to allow the model to run tool calls in parallel. | |
+| **user** | **kotlin.String** | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] |
+| **previousResponseId** | **kotlin.String** | The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](/docs/guides/conversation-state). | [optional] |
+| **reasoning** | [**Reasoning**](Reasoning.md) | | [optional] |
+| **maxOutputTokens** | **kotlin.Int** | An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] |
+| **text** | [**ResponsePropertiesText**](ResponsePropertiesText.md) | | [optional] |
+| **truncation** | [**inline**](#Truncation) | The truncation strategy to use for the model response. - `auto`: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation. - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. | [optional] |
+| **status** | [**inline**](#Status) | The status of the response generation. One of `completed`, `failed`, `in_progress`, or `incomplete`. | [optional] |
+| **outputText** | **kotlin.String** | SDK-only convenience property that contains the aggregated text output from all `output_text` items in the `output` array, if any are present. Supported in the Python and JavaScript SDKs. | [optional] |
+| **usage** | [**ResponseUsage**](ResponseUsage.md) | | [optional] |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | response |
+
+
+
+## Enum: truncation
+| Name | Value |
+| ---- | ----- |
+| truncation | auto, disabled |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | completed, failed, in_progress, incomplete |
+
+
+
diff --git a/lib/docs/ResponseAllOfIncompleteDetails.md b/lib/docs/ResponseAllOfIncompleteDetails.md
new file mode 100644
index 00000000..226f581f
--- /dev/null
+++ b/lib/docs/ResponseAllOfIncompleteDetails.md
@@ -0,0 +1,17 @@
+
+# ResponseAllOfIncompleteDetails
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **reason** | [**inline**](#Reason) | The reason why the response is incomplete. | [optional] |
+
+
+
+## Enum: reason
+| Name | Value |
+| ---- | ----- |
+| reason | max_output_tokens, content_filter |
+
+
+
diff --git a/lib/docs/ResponseAudioDeltaEvent.md b/lib/docs/ResponseAudioDeltaEvent.md
new file mode 100644
index 00000000..db05abf5
--- /dev/null
+++ b/lib/docs/ResponseAudioDeltaEvent.md
@@ -0,0 +1,18 @@
+
+# ResponseAudioDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.audio.delta`. | |
+| **delta** | **kotlin.String** | A chunk of Base64 encoded response audio bytes. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.audio.delta |
+
+
+
diff --git a/lib/docs/ResponseAudioDoneEvent.md b/lib/docs/ResponseAudioDoneEvent.md
new file mode 100644
index 00000000..ea934201
--- /dev/null
+++ b/lib/docs/ResponseAudioDoneEvent.md
@@ -0,0 +1,17 @@
+
+# ResponseAudioDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.audio.done`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.audio.done |
+
+
+
diff --git a/lib/docs/ResponseAudioTranscriptDeltaEvent.md b/lib/docs/ResponseAudioTranscriptDeltaEvent.md
new file mode 100644
index 00000000..a9286678
--- /dev/null
+++ b/lib/docs/ResponseAudioTranscriptDeltaEvent.md
@@ -0,0 +1,18 @@
+
+# ResponseAudioTranscriptDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.audio.transcript.delta`. | |
+| **delta** | **kotlin.String** | The partial transcript of the audio response. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.audio.transcript.delta |
+
+
+
diff --git a/lib/docs/ResponseAudioTranscriptDoneEvent.md b/lib/docs/ResponseAudioTranscriptDoneEvent.md
new file mode 100644
index 00000000..42ac34e9
--- /dev/null
+++ b/lib/docs/ResponseAudioTranscriptDoneEvent.md
@@ -0,0 +1,17 @@
+
+# ResponseAudioTranscriptDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.audio.transcript.done`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.audio.transcript.done |
+
+
+
diff --git a/lib/docs/ResponseCodeInterpreterCallCodeDeltaEvent.md b/lib/docs/ResponseCodeInterpreterCallCodeDeltaEvent.md
new file mode 100644
index 00000000..59e35ea2
--- /dev/null
+++ b/lib/docs/ResponseCodeInterpreterCallCodeDeltaEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseCodeInterpreterCallCodeDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.code_interpreter_call.code.delta`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the code interpreter call is in progress. | |
+| **delta** | **kotlin.String** | The partial code snippet added by the code interpreter. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.code_interpreter_call.code.delta |
+
+
+
diff --git a/lib/docs/ResponseCodeInterpreterCallCodeDoneEvent.md b/lib/docs/ResponseCodeInterpreterCallCodeDoneEvent.md
new file mode 100644
index 00000000..c943370e
--- /dev/null
+++ b/lib/docs/ResponseCodeInterpreterCallCodeDoneEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseCodeInterpreterCallCodeDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.code_interpreter_call.code.done`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the code interpreter call is in progress. | |
+| **code** | **kotlin.String** | The final code snippet output by the code interpreter. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.code_interpreter_call.code.done |
+
+
+
diff --git a/lib/docs/ResponseCodeInterpreterCallCompletedEvent.md b/lib/docs/ResponseCodeInterpreterCallCompletedEvent.md
new file mode 100644
index 00000000..7eab1ad9
--- /dev/null
+++ b/lib/docs/ResponseCodeInterpreterCallCompletedEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseCodeInterpreterCallCompletedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.code_interpreter_call.completed`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the code interpreter call is in progress. | |
+| **codeInterpreterCall** | [**CodeInterpreterToolCall**](CodeInterpreterToolCall.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.code_interpreter_call.completed |
+
+
+
diff --git a/lib/docs/ResponseCodeInterpreterCallInProgressEvent.md b/lib/docs/ResponseCodeInterpreterCallInProgressEvent.md
new file mode 100644
index 00000000..ce2151ec
--- /dev/null
+++ b/lib/docs/ResponseCodeInterpreterCallInProgressEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseCodeInterpreterCallInProgressEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.code_interpreter_call.in_progress`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the code interpreter call is in progress. | |
+| **codeInterpreterCall** | [**CodeInterpreterToolCall**](CodeInterpreterToolCall.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.code_interpreter_call.in_progress |
+
+
+
diff --git a/lib/docs/ResponseCodeInterpreterCallInterpretingEvent.md b/lib/docs/ResponseCodeInterpreterCallInterpretingEvent.md
new file mode 100644
index 00000000..901456d2
--- /dev/null
+++ b/lib/docs/ResponseCodeInterpreterCallInterpretingEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseCodeInterpreterCallInterpretingEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.code_interpreter_call.interpreting`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the code interpreter call is in progress. | |
+| **codeInterpreterCall** | [**CodeInterpreterToolCall**](CodeInterpreterToolCall.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.code_interpreter_call.interpreting |
+
+
+
diff --git a/lib/docs/ResponseCompletedEvent.md b/lib/docs/ResponseCompletedEvent.md
new file mode 100644
index 00000000..3fd274fe
--- /dev/null
+++ b/lib/docs/ResponseCompletedEvent.md
@@ -0,0 +1,18 @@
+
+# ResponseCompletedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.completed`. | |
+| **response** | [**Response**](Response.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.completed |
+
+
+
diff --git a/lib/docs/ResponseContentPartAddedEvent.md b/lib/docs/ResponseContentPartAddedEvent.md
new file mode 100644
index 00000000..14d4de16
--- /dev/null
+++ b/lib/docs/ResponseContentPartAddedEvent.md
@@ -0,0 +1,21 @@
+
+# ResponseContentPartAddedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.content_part.added`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the content part was added to. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the content part was added to. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that was added. | |
+| **part** | [**OutputContent**](OutputContent.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.content_part.added |
+
+
+
diff --git a/lib/docs/ResponseContentPartDoneEvent.md b/lib/docs/ResponseContentPartDoneEvent.md
new file mode 100644
index 00000000..87835b62
--- /dev/null
+++ b/lib/docs/ResponseContentPartDoneEvent.md
@@ -0,0 +1,21 @@
+
+# ResponseContentPartDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.content_part.done`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the content part was added to. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the content part was added to. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that is done. | |
+| **part** | [**OutputContent**](OutputContent.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.content_part.done |
+
+
+
diff --git a/lib/docs/ResponseCreatedEvent.md b/lib/docs/ResponseCreatedEvent.md
new file mode 100644
index 00000000..4c763f0a
--- /dev/null
+++ b/lib/docs/ResponseCreatedEvent.md
@@ -0,0 +1,18 @@
+
+# ResponseCreatedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.created`. | |
+| **response** | [**Response**](Response.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.created |
+
+
+
diff --git a/lib/docs/ResponseError.md b/lib/docs/ResponseError.md
new file mode 100644
index 00000000..a5d180ce
--- /dev/null
+++ b/lib/docs/ResponseError.md
@@ -0,0 +1,11 @@
+
+# ResponseError
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **code** | [**ResponseErrorCode**](ResponseErrorCode.md) | | |
+| **message** | **kotlin.String** | A human-readable description of the error. | |
+
+
+
diff --git a/lib/docs/ResponseErrorCode.md b/lib/docs/ResponseErrorCode.md
new file mode 100644
index 00000000..5bc38c88
--- /dev/null
+++ b/lib/docs/ResponseErrorCode.md
@@ -0,0 +1,44 @@
+
+# ResponseErrorCode
+
+## Enum
+
+
+ * `server_error` (value: `"server_error"`)
+
+ * `rate_limit_exceeded` (value: `"rate_limit_exceeded"`)
+
+ * `invalid_prompt` (value: `"invalid_prompt"`)
+
+ * `vector_store_timeout` (value: `"vector_store_timeout"`)
+
+ * `invalid_image` (value: `"invalid_image"`)
+
+ * `invalid_image_format` (value: `"invalid_image_format"`)
+
+ * `invalid_base64_image` (value: `"invalid_base64_image"`)
+
+ * `invalid_image_url` (value: `"invalid_image_url"`)
+
+ * `image_too_large` (value: `"image_too_large"`)
+
+ * `image_too_small` (value: `"image_too_small"`)
+
+ * `image_parse_error` (value: `"image_parse_error"`)
+
+ * `image_content_policy_violation` (value: `"image_content_policy_violation"`)
+
+ * `invalid_image_mode` (value: `"invalid_image_mode"`)
+
+ * `image_file_too_large` (value: `"image_file_too_large"`)
+
+ * `unsupported_image_media_type` (value: `"unsupported_image_media_type"`)
+
+ * `empty_image_file` (value: `"empty_image_file"`)
+
+ * `failed_to_download_image` (value: `"failed_to_download_image"`)
+
+ * `image_file_not_found` (value: `"image_file_not_found"`)
+
+
+
diff --git a/lib/docs/ResponseErrorEvent.md b/lib/docs/ResponseErrorEvent.md
new file mode 100644
index 00000000..a47ae3c5
--- /dev/null
+++ b/lib/docs/ResponseErrorEvent.md
@@ -0,0 +1,20 @@
+
+# ResponseErrorEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `error`. | |
+| **code** | **kotlin.String** | The error code. | |
+| **message** | **kotlin.String** | The error message. | |
+| **`param`** | **kotlin.String** | The error parameter. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | error |
+
+
+
diff --git a/lib/docs/ResponseFailedEvent.md b/lib/docs/ResponseFailedEvent.md
new file mode 100644
index 00000000..6ad4b06f
--- /dev/null
+++ b/lib/docs/ResponseFailedEvent.md
@@ -0,0 +1,18 @@
+
+# ResponseFailedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.failed`. | |
+| **response** | [**Response**](Response.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.failed |
+
+
+
diff --git a/lib/docs/ResponseFileSearchCallCompletedEvent.md b/lib/docs/ResponseFileSearchCallCompletedEvent.md
new file mode 100644
index 00000000..9bafd849
--- /dev/null
+++ b/lib/docs/ResponseFileSearchCallCompletedEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseFileSearchCallCompletedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.file_search_call.completed`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the file search call is initiated. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the file search call is initiated. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.file_search_call.completed |
+
+
+
diff --git a/lib/docs/ResponseFileSearchCallInProgressEvent.md b/lib/docs/ResponseFileSearchCallInProgressEvent.md
new file mode 100644
index 00000000..d13fec98
--- /dev/null
+++ b/lib/docs/ResponseFileSearchCallInProgressEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseFileSearchCallInProgressEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.file_search_call.in_progress`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the file search call is initiated. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the file search call is initiated. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.file_search_call.in_progress |
+
+
+
diff --git a/lib/docs/ResponseFileSearchCallSearchingEvent.md b/lib/docs/ResponseFileSearchCallSearchingEvent.md
new file mode 100644
index 00000000..cdb6f786
--- /dev/null
+++ b/lib/docs/ResponseFileSearchCallSearchingEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseFileSearchCallSearchingEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.file_search_call.searching`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the file search call is searching. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the file search call is initiated. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.file_search_call.searching |
+
+
+
diff --git a/lib/docs/ResponseFormatJsonObject.md b/lib/docs/ResponseFormatJsonObject.md
index 9a3ac132..b3e1874f 100644
--- a/lib/docs/ResponseFormatJsonObject.md
+++ b/lib/docs/ResponseFormatJsonObject.md
@@ -4,7 +4,7 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **type** | [**inline**](#Type) | The type of response format being defined: `json_object` | |
+| **type** | [**inline**](#Type) | The type of response format being defined. Always `json_object`. | |
diff --git a/lib/docs/ResponseFormatJsonSchema.md b/lib/docs/ResponseFormatJsonSchema.md
index cb071949..47f2835a 100644
--- a/lib/docs/ResponseFormatJsonSchema.md
+++ b/lib/docs/ResponseFormatJsonSchema.md
@@ -4,8 +4,8 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **type** | [**inline**](#Type) | The type of response format being defined: `json_schema` | |
-| **jsonSchema** | [**ResponseFormatJsonSchemaJsonSchema**](ResponseFormatJsonSchemaJsonSchema.md) | | |
+| **type** | [**inline**](#Type) | The type of response format being defined. Always `json_schema`. | |
+| **jsonSchema** | [**JSONSchema**](JSONSchema.md) | | |
diff --git a/lib/docs/ResponseFormatText.md b/lib/docs/ResponseFormatText.md
index 75952919..73233408 100644
--- a/lib/docs/ResponseFormatText.md
+++ b/lib/docs/ResponseFormatText.md
@@ -4,7 +4,7 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **type** | [**inline**](#Type) | The type of response format being defined: `text` | |
+| **type** | [**inline**](#Type) | The type of response format being defined. Always `text`. | |
diff --git a/lib/docs/ResponseFunctionCallArgumentsDeltaEvent.md b/lib/docs/ResponseFunctionCallArgumentsDeltaEvent.md
new file mode 100644
index 00000000..b06ecf55
--- /dev/null
+++ b/lib/docs/ResponseFunctionCallArgumentsDeltaEvent.md
@@ -0,0 +1,20 @@
+
+# ResponseFunctionCallArgumentsDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.function_call_arguments.delta`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the function-call arguments delta is added to. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the function-call arguments delta is added to. | |
+| **delta** | **kotlin.String** | The function-call arguments delta that is added. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.function_call_arguments.delta |
+
+
+
diff --git a/lib/docs/ResponseFunctionCallArgumentsDoneEvent.md b/lib/docs/ResponseFunctionCallArgumentsDoneEvent.md
new file mode 100644
index 00000000..8664585f
--- /dev/null
+++ b/lib/docs/ResponseFunctionCallArgumentsDoneEvent.md
@@ -0,0 +1,20 @@
+
+# ResponseFunctionCallArgumentsDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | | |
+| **itemId** | **kotlin.String** | The ID of the item. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item. | |
+| **arguments** | **kotlin.String** | The function-call arguments. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.function_call_arguments.done |
+
+
+
diff --git a/lib/docs/ResponseInProgressEvent.md b/lib/docs/ResponseInProgressEvent.md
new file mode 100644
index 00000000..03c851ff
--- /dev/null
+++ b/lib/docs/ResponseInProgressEvent.md
@@ -0,0 +1,18 @@
+
+# ResponseInProgressEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.in_progress`. | |
+| **response** | [**Response**](Response.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.in_progress |
+
+
+
diff --git a/lib/docs/ResponseIncompleteEvent.md b/lib/docs/ResponseIncompleteEvent.md
new file mode 100644
index 00000000..19815de5
--- /dev/null
+++ b/lib/docs/ResponseIncompleteEvent.md
@@ -0,0 +1,18 @@
+
+# ResponseIncompleteEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.incomplete`. | |
+| **response** | [**Response**](Response.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.incomplete |
+
+
+
diff --git a/lib/docs/ResponseItemList.md b/lib/docs/ResponseItemList.md
new file mode 100644
index 00000000..3eef48c4
--- /dev/null
+++ b/lib/docs/ResponseItemList.md
@@ -0,0 +1,21 @@
+
+# ResponseItemList
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`object`** | [**inline**](#`Object`) | The type of object returned, must be `list`. | |
+| **`data`** | [**kotlin.collections.List<ItemResource>**](ItemResource.md) | A list of items used to generate this response. | |
+| **hasMore** | **kotlin.Boolean** | Whether there are more items available. | |
+| **firstId** | **kotlin.String** | The ID of the first item in the list. | |
+| **lastId** | **kotlin.String** | The ID of the last item in the list. | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | list |
+
+
+
diff --git a/lib/docs/ResponseOutputItemAddedEvent.md b/lib/docs/ResponseOutputItemAddedEvent.md
new file mode 100644
index 00000000..f50e7647
--- /dev/null
+++ b/lib/docs/ResponseOutputItemAddedEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseOutputItemAddedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.output_item.added`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that was added. | |
+| **item** | [**OutputItem**](OutputItem.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.output_item.added |
+
+
+
diff --git a/lib/docs/ResponseOutputItemDoneEvent.md b/lib/docs/ResponseOutputItemDoneEvent.md
new file mode 100644
index 00000000..e5ca0e26
--- /dev/null
+++ b/lib/docs/ResponseOutputItemDoneEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseOutputItemDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.output_item.done`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that was marked done. | |
+| **item** | [**OutputItem**](OutputItem.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.output_item.done |
+
+
+
diff --git a/lib/docs/ResponseProperties.md b/lib/docs/ResponseProperties.md
new file mode 100644
index 00000000..8327e33b
--- /dev/null
+++ b/lib/docs/ResponseProperties.md
@@ -0,0 +1,25 @@
+
+# ResponseProperties
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **previousResponseId** | **kotlin.String** | The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](/docs/guides/conversation-state). | [optional] |
+| **model** | [**ModelIdsResponses**](ModelIdsResponses.md) | | [optional] |
+| **reasoning** | [**Reasoning**](Reasoning.md) | | [optional] |
+| **maxOutputTokens** | **kotlin.Int** | An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] |
+| **instructions** | **kotlin.String** | Inserts a system (or developer) message as the first item in the model's context. When using along with `previous_response_id`, the instructions from a previous response will be not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. | [optional] |
+| **text** | [**ResponsePropertiesText**](ResponsePropertiesText.md) | | [optional] |
+| **tools** | [**kotlin.collections.List<Tool>**](Tool.md) | An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. The two categories of tools you can provide the model are: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like [web search](/docs/guides/tools-web-search) or [file search](/docs/guides/tools-file-search). Learn more about [built-in tools](/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling the model to call your own code. Learn more about [function calling](/docs/guides/function-calling). | [optional] |
+| **toolChoice** | [**ResponsePropertiesToolChoice**](ResponsePropertiesToolChoice.md) | | [optional] |
+| **truncation** | [**inline**](#Truncation) | The truncation strategy to use for the model response. - `auto`: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation. - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. | [optional] |
+
+
+
+## Enum: truncation
+| Name | Value |
+| ---- | ----- |
+| truncation | auto, disabled |
+
+
+
diff --git a/lib/docs/ResponsePropertiesText.md b/lib/docs/ResponsePropertiesText.md
new file mode 100644
index 00000000..c219334b
--- /dev/null
+++ b/lib/docs/ResponsePropertiesText.md
@@ -0,0 +1,10 @@
+
+# ResponsePropertiesText
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **format** | [**TextResponseFormatConfiguration**](TextResponseFormatConfiguration.md) | | [optional] |
+
+
+
diff --git a/lib/docs/ResponsePropertiesToolChoice.md b/lib/docs/ResponsePropertiesToolChoice.md
new file mode 100644
index 00000000..6f48915e
--- /dev/null
+++ b/lib/docs/ResponsePropertiesToolChoice.md
@@ -0,0 +1,18 @@
+
+# ResponsePropertiesToolChoice
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of hosted tool the model should to use. Learn more about [built-in tools](/docs/guides/tools). Allowed values are: - `file_search` - `web_search_preview` - `computer_use_preview` | |
+| **name** | **kotlin.String** | The name of the function to call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_search, web_search_preview, computer_use_preview, web_search_preview_2025_03_11, function |
+
+
+
diff --git a/lib/docs/ResponseRefusalDeltaEvent.md b/lib/docs/ResponseRefusalDeltaEvent.md
new file mode 100644
index 00000000..095f9269
--- /dev/null
+++ b/lib/docs/ResponseRefusalDeltaEvent.md
@@ -0,0 +1,21 @@
+
+# ResponseRefusalDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.refusal.delta`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the refusal text is added to. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the refusal text is added to. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that the refusal text is added to. | |
+| **delta** | **kotlin.String** | The refusal text that is added. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.refusal.delta |
+
+
+
diff --git a/lib/docs/ResponseRefusalDoneEvent.md b/lib/docs/ResponseRefusalDoneEvent.md
new file mode 100644
index 00000000..cec7a0cf
--- /dev/null
+++ b/lib/docs/ResponseRefusalDoneEvent.md
@@ -0,0 +1,21 @@
+
+# ResponseRefusalDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.refusal.done`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the refusal text is finalized. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the refusal text is finalized. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that the refusal text is finalized. | |
+| **refusal** | **kotlin.String** | The refusal text that is finalized. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.refusal.done |
+
+
+
diff --git a/lib/docs/ResponseStreamEvent.md b/lib/docs/ResponseStreamEvent.md
new file mode 100644
index 00000000..698b033b
--- /dev/null
+++ b/lib/docs/ResponseStreamEvent.md
@@ -0,0 +1,33 @@
+
+# ResponseStreamEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.audio.delta`. | |
+| **delta** | **kotlin.String** | The text delta that was added. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the web search call is associated with. | |
+| **code** | **kotlin.String** | The error code. | |
+| **codeInterpreterCall** | [**CodeInterpreterToolCall**](CodeInterpreterToolCall.md) | | |
+| **response** | [**Response**](Response.md) | | |
+| **itemId** | **kotlin.String** | Unique ID for the output item associated with the web search call. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that the text content is finalized. | |
+| **part** | [**OutputContent**](OutputContent.md) | | |
+| **message** | **kotlin.String** | The error message. | |
+| **`param`** | **kotlin.String** | The error parameter. | |
+| **arguments** | **kotlin.String** | The function-call arguments. | |
+| **item** | [**OutputItem**](OutputItem.md) | | |
+| **refusal** | **kotlin.String** | The refusal text that is finalized. | |
+| **annotationIndex** | **kotlin.Int** | The index of the annotation that was added. | |
+| **`annotation`** | [**Annotation**](Annotation.md) | | |
+| **text** | **kotlin.String** | The text content that is finalized. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.audio.delta, response.audio.done, response.audio.transcript.delta, response.audio.transcript.done, response.code_interpreter_call.code.delta, response.code_interpreter_call.code.done, response.code_interpreter_call.completed, response.code_interpreter_call.in_progress, response.code_interpreter_call.interpreting, response.completed, response.content_part.added, response.content_part.done, response.created, error, response.file_search_call.completed, response.file_search_call.in_progress, response.file_search_call.searching, response.function_call_arguments.delta, response.function_call_arguments.done, response.in_progress, response.failed, response.incomplete, response.output_item.added, response.output_item.done, response.refusal.delta, response.refusal.done, response.output_text.annotation.added, response.output_text.delta, response.output_text.done, response.web_search_call.completed, response.web_search_call.in_progress, response.web_search_call.searching |
+
+
+
diff --git a/lib/docs/ResponseTextAnnotationDeltaEvent.md b/lib/docs/ResponseTextAnnotationDeltaEvent.md
new file mode 100644
index 00000000..4951acee
--- /dev/null
+++ b/lib/docs/ResponseTextAnnotationDeltaEvent.md
@@ -0,0 +1,22 @@
+
+# ResponseTextAnnotationDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.output_text.annotation.added`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the text annotation was added to. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the text annotation was added to. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that the text annotation was added to. | |
+| **annotationIndex** | **kotlin.Int** | The index of the annotation that was added. | |
+| **`annotation`** | [**Annotation**](Annotation.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.output_text.annotation.added |
+
+
+
diff --git a/lib/docs/ResponseTextDeltaEvent.md b/lib/docs/ResponseTextDeltaEvent.md
new file mode 100644
index 00000000..7700f68b
--- /dev/null
+++ b/lib/docs/ResponseTextDeltaEvent.md
@@ -0,0 +1,21 @@
+
+# ResponseTextDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.output_text.delta`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the text delta was added to. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the text delta was added to. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that the text delta was added to. | |
+| **delta** | **kotlin.String** | The text delta that was added. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.output_text.delta |
+
+
+
diff --git a/lib/docs/ResponseTextDoneEvent.md b/lib/docs/ResponseTextDoneEvent.md
new file mode 100644
index 00000000..24732762
--- /dev/null
+++ b/lib/docs/ResponseTextDoneEvent.md
@@ -0,0 +1,21 @@
+
+# ResponseTextDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.output_text.done`. | |
+| **itemId** | **kotlin.String** | The ID of the output item that the text content is finalized. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the text content is finalized. | |
+| **contentIndex** | **kotlin.Int** | The index of the content part that the text content is finalized. | |
+| **text** | **kotlin.String** | The text content that is finalized. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.output_text.done |
+
+
+
diff --git a/lib/docs/ResponseUsage.md b/lib/docs/ResponseUsage.md
new file mode 100644
index 00000000..579cc002
--- /dev/null
+++ b/lib/docs/ResponseUsage.md
@@ -0,0 +1,14 @@
+
+# ResponseUsage
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **inputTokens** | **kotlin.Int** | The number of input tokens. | |
+| **inputTokensDetails** | [**ResponseUsageInputTokensDetails**](ResponseUsageInputTokensDetails.md) | | |
+| **outputTokens** | **kotlin.Int** | The number of output tokens. | |
+| **outputTokensDetails** | [**ResponseUsageOutputTokensDetails**](ResponseUsageOutputTokensDetails.md) | | |
+| **totalTokens** | **kotlin.Int** | The total number of tokens used. | |
+
+
+
diff --git a/lib/docs/ResponseUsageInputTokensDetails.md b/lib/docs/ResponseUsageInputTokensDetails.md
new file mode 100644
index 00000000..67dab8dc
--- /dev/null
+++ b/lib/docs/ResponseUsageInputTokensDetails.md
@@ -0,0 +1,10 @@
+
+# ResponseUsageInputTokensDetails
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **cachedTokens** | **kotlin.Int** | The number of tokens that were retrieved from the cache. [More on prompt caching](/docs/guides/prompt-caching). | |
+
+
+
diff --git a/lib/docs/ResponseUsageOutputTokensDetails.md b/lib/docs/ResponseUsageOutputTokensDetails.md
new file mode 100644
index 00000000..309cabb4
--- /dev/null
+++ b/lib/docs/ResponseUsageOutputTokensDetails.md
@@ -0,0 +1,10 @@
+
+# ResponseUsageOutputTokensDetails
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **reasoningTokens** | **kotlin.Int** | The number of reasoning tokens. | |
+
+
+
diff --git a/lib/docs/ResponseWebSearchCallCompletedEvent.md b/lib/docs/ResponseWebSearchCallCompletedEvent.md
new file mode 100644
index 00000000..8cd51ce0
--- /dev/null
+++ b/lib/docs/ResponseWebSearchCallCompletedEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseWebSearchCallCompletedEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.web_search_call.completed`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the web search call is associated with. | |
+| **itemId** | **kotlin.String** | Unique ID for the output item associated with the web search call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.web_search_call.completed |
+
+
+
diff --git a/lib/docs/ResponseWebSearchCallInProgressEvent.md b/lib/docs/ResponseWebSearchCallInProgressEvent.md
new file mode 100644
index 00000000..180fb240
--- /dev/null
+++ b/lib/docs/ResponseWebSearchCallInProgressEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseWebSearchCallInProgressEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.web_search_call.in_progress`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the web search call is associated with. | |
+| **itemId** | **kotlin.String** | Unique ID for the output item associated with the web search call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.web_search_call.in_progress |
+
+
+
diff --git a/lib/docs/ResponseWebSearchCallSearchingEvent.md b/lib/docs/ResponseWebSearchCallSearchingEvent.md
new file mode 100644
index 00000000..f7a72bce
--- /dev/null
+++ b/lib/docs/ResponseWebSearchCallSearchingEvent.md
@@ -0,0 +1,19 @@
+
+# ResponseWebSearchCallSearchingEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `response.web_search_call.searching`. | |
+| **outputIndex** | **kotlin.Int** | The index of the output item that the web search call is associated with. | |
+| **itemId** | **kotlin.String** | Unique ID for the output item associated with the web search call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | response.web_search_call.searching |
+
+
+
diff --git a/lib/docs/ResponsesApi.md b/lib/docs/ResponsesApi.md
new file mode 100644
index 00000000..bd5e6c41
--- /dev/null
+++ b/lib/docs/ResponsesApi.md
@@ -0,0 +1,205 @@
+# ResponsesApi
+
+All URIs are relative to *https://api.openai.com/v1*
+
+| Method | HTTP request | Description |
+| ------------- | ------------- | ------------- |
+| [**createResponse**](ResponsesApi.md#createResponse) | **POST** /responses | Creates a model response. Provide [text](/docs/guides/text) or [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own [custom code](/docs/guides/function-calling) or use built-in [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or [file search](/docs/guides/tools-file-search) to use your own data as input for the model's response. |
+| [**deleteResponse**](ResponsesApi.md#deleteResponse) | **DELETE** /responses/{response_id} | Deletes a model response with the given ID. |
+| [**getResponse**](ResponsesApi.md#getResponse) | **GET** /responses/{response_id} | Retrieves a model response with the given ID. |
+| [**listInputItems**](ResponsesApi.md#listInputItems) | **GET** /responses/{response_id}/input_items | Returns a list of input items for a given response. |
+
+
+
+# **createResponse**
+> Response createResponse(createResponse)
+
+Creates a model response. Provide [text](/docs/guides/text) or [image](/docs/guides/images) inputs to generate [text](/docs/guides/text) or [JSON](/docs/guides/structured-outputs) outputs. Have the model call your own [custom code](/docs/guides/function-calling) or use built-in [tools](/docs/guides/tools) like [web search](/docs/guides/tools-web-search) or [file search](/docs/guides/tools-file-search) to use your own data as input for the model's response.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ResponsesApi()
+val createResponse : CreateResponse = // CreateResponse |
+try {
+ val result : Response = apiInstance.createResponse(createResponse)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ResponsesApi#createResponse")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ResponsesApi#createResponse")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **createResponse** | [**CreateResponse**](CreateResponse.md)| | |
+
+### Return type
+
+[**Response**](Response.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json
+
+
+# **deleteResponse**
+> deleteResponse(responseId)
+
+Deletes a model response with the given ID.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ResponsesApi()
+val responseId : kotlin.String = resp_677efb5139a88190b512bc3fef8e535d // kotlin.String | The ID of the response to delete.
+try {
+ apiInstance.deleteResponse(responseId)
+} catch (e: ClientException) {
+ println("4xx response calling ResponsesApi#deleteResponse")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ResponsesApi#deleteResponse")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **responseId** | **kotlin.String**| The ID of the response to delete. | |
+
+### Return type
+
+null (empty response body)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **getResponse**
+> Response getResponse(responseId, include)
+
+Retrieves a model response with the given ID.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ResponsesApi()
+val responseId : kotlin.String = resp_677efb5139a88190b512bc3fef8e535d // kotlin.String | The ID of the response to retrieve.
+val include : kotlin.collections.List = // kotlin.collections.List | Specify additional output data to include in the response. Currently supported values are: - `file_search_call.results`: Include the search results of the file search tool call. - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output.
+try {
+ val result : Response = apiInstance.getResponse(responseId, include)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ResponsesApi#getResponse")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ResponsesApi#getResponse")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **responseId** | **kotlin.String**| The ID of the response to retrieve. | |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **include** | [**kotlin.collections.List<Includable>**](Includable.md)| Specify additional output data to include in the response. Currently supported values are: - `file_search_call.results`: Include the search results of the file search tool call. - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. | [optional] |
+
+### Return type
+
+[**Response**](Response.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **listInputItems**
+> ResponseItemList listInputItems(responseId, limit, order, after, before)
+
+Returns a list of input items for a given response.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = ResponsesApi()
+val responseId : kotlin.String = responseId_example // kotlin.String | The ID of the response to retrieve input items for.
+val limit : kotlin.Int = 56 // kotlin.Int | A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
+val order : kotlin.String = order_example // kotlin.String | The order to return the input items in. Default is `asc`. - `asc`: Return the input items in ascending order. - `desc`: Return the input items in descending order.
+val after : kotlin.String = after_example // kotlin.String | An item ID to list items after, used in pagination.
+val before : kotlin.String = before_example // kotlin.String | An item ID to list items before, used in pagination.
+try {
+ val result : ResponseItemList = apiInstance.listInputItems(responseId, limit, order, after, before)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling ResponsesApi#listInputItems")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling ResponsesApi#listInputItems")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **responseId** | **kotlin.String**| The ID of the response to retrieve input items for. | |
+| **limit** | **kotlin.Int**| A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. | [optional] [default to 20] |
+| **order** | **kotlin.String**| The order to return the input items in. Default is `asc`. - `asc`: Return the input items in ascending order. - `desc`: Return the input items in descending order. | [optional] [enum: asc, desc] |
+| **after** | **kotlin.String**| An item ID to list items after, used in pagination. | [optional] |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **before** | **kotlin.String**| An item ID to list items before, used in pagination. | [optional] |
+
+### Return type
+
+[**ResponseItemList**](ResponseItemList.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
diff --git a/lib/docs/RunObject.md b/lib/docs/RunObject.md
index 851e729f..60c5a1ec 100644
--- a/lib/docs/RunObject.md
+++ b/lib/docs/RunObject.md
@@ -28,7 +28,7 @@
| **truncationStrategy** | [**CreateRunRequestTruncationStrategy**](CreateRunRequestTruncationStrategy.md) | | |
| **toolChoice** | [**CreateRunRequestToolChoice**](CreateRunRequestToolChoice.md) | | |
| **parallelToolCalls** | **kotlin.Boolean** | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | |
-| **responseFormat** | [**AssistantObjectResponseFormat**](AssistantObjectResponseFormat.md) | | |
+| **responseFormat** | [**AssistantsApiResponseFormatOption**](AssistantsApiResponseFormatOption.md) | | |
| **temperature** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The sampling temperature used for this run. If not set, defaults to 1. | [optional] |
| **topP** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The nucleus sampling value used for this run. If not set, defaults to 1. | [optional] |
diff --git a/lib/docs/RunStepDetailsToolCallsFileSearchRankingOptionsObject.md b/lib/docs/RunStepDetailsToolCallsFileSearchRankingOptionsObject.md
index 4466e568..71bb8c90 100644
--- a/lib/docs/RunStepDetailsToolCallsFileSearchRankingOptionsObject.md
+++ b/lib/docs/RunStepDetailsToolCallsFileSearchRankingOptionsObject.md
@@ -4,15 +4,8 @@
## Properties
| Name | Type | Description | Notes |
| ------------ | ------------- | ------------- | ------------- |
-| **ranker** | [**inline**](#Ranker) | The ranker used for the file search. | |
+| **ranker** | [**FileSearchRanker**](FileSearchRanker.md) | | |
| **scoreThreshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The score threshold for the file search. All values must be a floating point number between 0 and 1. | |
-
-## Enum: ranker
-| Name | Value |
-| ---- | ----- |
-| ranker | default_2024_08_21 |
-
-
diff --git a/lib/docs/Screenshot.md b/lib/docs/Screenshot.md
new file mode 100644
index 00000000..8d21936e
--- /dev/null
+++ b/lib/docs/Screenshot.md
@@ -0,0 +1,17 @@
+
+# Screenshot
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a screenshot action, this property is always set to `screenshot`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | screenshot |
+
+
+
diff --git a/lib/docs/Scroll.md b/lib/docs/Scroll.md
new file mode 100644
index 00000000..234047db
--- /dev/null
+++ b/lib/docs/Scroll.md
@@ -0,0 +1,21 @@
+
+# Scroll
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a scroll action, this property is always set to `scroll`. | |
+| **x** | **kotlin.Int** | The x-coordinate where the scroll occurred. | |
+| **y** | **kotlin.Int** | The y-coordinate where the scroll occurred. | |
+| **scrollX** | **kotlin.Int** | The horizontal scroll distance. | |
+| **scrollY** | **kotlin.Int** | The vertical scroll distance. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | scroll |
+
+
+
diff --git a/lib/docs/StopConfiguration.md b/lib/docs/StopConfiguration.md
new file mode 100644
index 00000000..fdcffa9d
--- /dev/null
+++ b/lib/docs/StopConfiguration.md
@@ -0,0 +1,9 @@
+
+# StopConfiguration
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+
+
+
diff --git a/lib/docs/TextResponseFormatConfiguration.md b/lib/docs/TextResponseFormatConfiguration.md
new file mode 100644
index 00000000..33d3fbff
--- /dev/null
+++ b/lib/docs/TextResponseFormatConfiguration.md
@@ -0,0 +1,21 @@
+
+# TextResponseFormatConfiguration
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of response format being defined. Always `text`. | |
+| **schema** | [**kotlin.collections.Map<kotlin.String, kotlin.Any>**](kotlin.Any.md) | The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas [here](https://json-schema.org/). | |
+| **description** | **kotlin.String** | A description of what the response format is for, used by the model to determine how to respond in the format. | [optional] |
+| **name** | **kotlin.String** | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | [optional] |
+| **strict** | **kotlin.Boolean** | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | text, json_schema, json_object |
+
+
+
diff --git a/lib/docs/TextResponseFormatJsonSchema.md b/lib/docs/TextResponseFormatJsonSchema.md
new file mode 100644
index 00000000..cd675430
--- /dev/null
+++ b/lib/docs/TextResponseFormatJsonSchema.md
@@ -0,0 +1,21 @@
+
+# TextResponseFormatJsonSchema
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of response format being defined. Always `json_schema`. | |
+| **schema** | [**kotlin.collections.Map<kotlin.String, kotlin.Any>**](kotlin.Any.md) | The schema for the response format, described as a JSON Schema object. Learn how to build JSON schemas [here](https://json-schema.org/). | |
+| **description** | **kotlin.String** | A description of what the response format is for, used by the model to determine how to respond in the format. | [optional] |
+| **name** | **kotlin.String** | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | [optional] |
+| **strict** | **kotlin.Boolean** | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | json_schema |
+
+
+
diff --git a/lib/docs/Tool.md b/lib/docs/Tool.md
new file mode 100644
index 00000000..b839098c
--- /dev/null
+++ b/lib/docs/Tool.md
@@ -0,0 +1,37 @@
+
+# Tool
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the file search tool. Always `file_search`. | |
+| **vectorStoreIds** | **kotlin.collections.List<kotlin.String>** | The IDs of the vector stores to search. | |
+| **name** | **kotlin.String** | The name of the function to call. | |
+| **parameters** | [**kotlin.collections.Map<kotlin.String, kotlin.Any>**](kotlin.Any.md) | A JSON schema object describing the parameters of the function. | |
+| **strict** | **kotlin.Boolean** | Whether to enforce strict parameter validation. Default `true`. | |
+| **displayWidth** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The width of the computer display. | |
+| **displayHeight** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The height of the computer display. | |
+| **environment** | [**inline**](#Environment) | The type of computer environment to control. | |
+| **maxNumResults** | **kotlin.Int** | The maximum number of results to return. This number should be between 1 and 50 inclusive. | [optional] |
+| **filters** | [**FileSearchToolFilters**](FileSearchToolFilters.md) | | [optional] |
+| **rankingOptions** | [**FileSearchToolRankingOptions**](FileSearchToolRankingOptions.md) | | [optional] |
+| **description** | **kotlin.String** | A description of the function. Used by the model to determine whether or not to call the function. | [optional] |
+| **userLocation** | [**WebSearchToolUserLocation**](WebSearchToolUserLocation.md) | | [optional] |
+| **searchContextSize** | [**WebSearchContextSize**](WebSearchContextSize.md) | | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_search, function, computer_use_preview, web_search_preview, web_search_preview_2025_03_11 |
+
+
+
+## Enum: environment
+| Name | Value |
+| ---- | ----- |
+| environment | mac, windows, ubuntu, browser |
+
+
+
diff --git a/lib/docs/ToolChoiceFunction.md b/lib/docs/ToolChoiceFunction.md
new file mode 100644
index 00000000..eee3df36
--- /dev/null
+++ b/lib/docs/ToolChoiceFunction.md
@@ -0,0 +1,18 @@
+
+# ToolChoiceFunction
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | For function calling, the type is always `function`. | |
+| **name** | **kotlin.String** | The name of the function to call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | function |
+
+
+
diff --git a/lib/docs/ToolChoiceOptions.md b/lib/docs/ToolChoiceOptions.md
new file mode 100644
index 00000000..2f52a549
--- /dev/null
+++ b/lib/docs/ToolChoiceOptions.md
@@ -0,0 +1,14 @@
+
+# ToolChoiceOptions
+
+## Enum
+
+
+ * `none` (value: `"none"`)
+
+ * `auto` (value: `"auto"`)
+
+ * `required` (value: `"required"`)
+
+
+
diff --git a/lib/docs/ToolChoiceTypes.md b/lib/docs/ToolChoiceTypes.md
new file mode 100644
index 00000000..961167df
--- /dev/null
+++ b/lib/docs/ToolChoiceTypes.md
@@ -0,0 +1,17 @@
+
+# ToolChoiceTypes
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of hosted tool the model should to use. Learn more about [built-in tools](/docs/guides/tools). Allowed values are: - `file_search` - `web_search_preview` - `computer_use_preview` | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | file_search, web_search_preview, computer_use_preview, web_search_preview_2025_03_11 |
+
+
+
diff --git a/lib/docs/TranscriptTextDeltaEvent.md b/lib/docs/TranscriptTextDeltaEvent.md
new file mode 100644
index 00000000..eed4900c
--- /dev/null
+++ b/lib/docs/TranscriptTextDeltaEvent.md
@@ -0,0 +1,19 @@
+
+# TranscriptTextDeltaEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `transcript.text.delta`. | |
+| **delta** | **kotlin.String** | The text delta that was additionally transcribed. | |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the delta. Only included if you [create a transcription](/docs/api-reference/audio/create-transcription) with the `include[]` parameter set to `logprobs`. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | transcript.text.delta |
+
+
+
diff --git a/lib/docs/TranscriptTextDoneEvent.md b/lib/docs/TranscriptTextDoneEvent.md
new file mode 100644
index 00000000..e1167b62
--- /dev/null
+++ b/lib/docs/TranscriptTextDoneEvent.md
@@ -0,0 +1,19 @@
+
+# TranscriptTextDoneEvent
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the event. Always `transcript.text.done`. | |
+| **text** | **kotlin.String** | The text that was transcribed. | |
+| **logprobs** | [**kotlin.collections.List<LogProbProperties>**](LogProbProperties.md) | The log probabilities of the individual tokens in the transcription. Only included if you [create a transcription](/docs/api-reference/audio/create-transcription) with the `include[]` parameter set to `logprobs`. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | transcript.text.done |
+
+
+
diff --git a/lib/docs/TranscriptionInclude.md b/lib/docs/TranscriptionInclude.md
new file mode 100644
index 00000000..8f336a49
--- /dev/null
+++ b/lib/docs/TranscriptionInclude.md
@@ -0,0 +1,10 @@
+
+# TranscriptionInclude
+
+## Enum
+
+
+ * `logprobs` (value: `"logprobs"`)
+
+
+
diff --git a/lib/docs/Type.md b/lib/docs/Type.md
new file mode 100644
index 00000000..c68e84ba
--- /dev/null
+++ b/lib/docs/Type.md
@@ -0,0 +1,18 @@
+
+# Type
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a type action, this property is always set to `type`. | |
+| **text** | **kotlin.String** | The text to type. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | type |
+
+
+
diff --git a/lib/docs/UpdateChatCompletionRequest.md b/lib/docs/UpdateChatCompletionRequest.md
new file mode 100644
index 00000000..9202c202
--- /dev/null
+++ b/lib/docs/UpdateChatCompletionRequest.md
@@ -0,0 +1,10 @@
+
+# UpdateChatCompletionRequest
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **metadata** | **kotlin.collections.Map<kotlin.String, kotlin.String>** | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. | |
+
+
+
diff --git a/lib/docs/UpdateVectorStoreFileAttributesRequest.md b/lib/docs/UpdateVectorStoreFileAttributesRequest.md
new file mode 100644
index 00000000..318d5e2f
--- /dev/null
+++ b/lib/docs/UpdateVectorStoreFileAttributesRequest.md
@@ -0,0 +1,10 @@
+
+# UpdateVectorStoreFileAttributesRequest
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **attributes** | [**kotlin.collections.Map<kotlin.String, VectorStoreFileAttributesValue>**](VectorStoreFileAttributesValue.md) | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. | |
+
+
+
diff --git a/lib/docs/Upload.md b/lib/docs/Upload.md
index 2d198c83..6e9749e2 100644
--- a/lib/docs/Upload.md
+++ b/lib/docs/Upload.md
@@ -10,7 +10,7 @@
| **bytes** | **kotlin.Int** | The intended number of bytes to be uploaded. | |
| **purpose** | **kotlin.String** | The intended purpose of the file. [Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values. | |
| **status** | [**inline**](#Status) | The status of the Upload. | |
-| **expiresAt** | **kotlin.Int** | The Unix timestamp (in seconds) for when the Upload was created. | |
+| **expiresAt** | **kotlin.Int** | The Unix timestamp (in seconds) for when the Upload will expire. | |
| **`object`** | [**inline**](#`Object`) | The object type, which is always \"upload\". | [optional] |
| **file** | [**UploadFile**](UploadFile.md) | | [optional] |
diff --git a/lib/docs/UploadFile.md b/lib/docs/UploadFile.md
index 2143b66a..c453c0b7 100644
--- a/lib/docs/UploadFile.md
+++ b/lib/docs/UploadFile.md
@@ -11,6 +11,7 @@
| **`object`** | [**inline**](#`Object`) | The object type, which is always `file`. | |
| **purpose** | [**inline**](#Purpose) | The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. | |
| **status** | [**inline**](#Status) | Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. | |
+| **expiresAt** | **kotlin.Int** | The Unix timestamp (in seconds) for when the file will expire. | [optional] |
| **statusDetails** | **kotlin.String** | Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. | [optional] |
diff --git a/lib/docs/UploadsApi.md b/lib/docs/UploadsApi.md
index 8ce0a234..9c1eac2a 100644
--- a/lib/docs/UploadsApi.md
+++ b/lib/docs/UploadsApi.md
@@ -7,7 +7,7 @@ All URIs are relative to *https://api.openai.com/v1*
| [**addUploadPart**](UploadsApi.md#addUploadPart) | **POST** /uploads/{upload_id}/parts | Adds a [Part](/docs/api-reference/uploads/part-object) to an [Upload](/docs/api-reference/uploads/object) object. A Part represents a chunk of bytes from the file you are trying to upload. Each Part can be at most 64 MB, and you can add Parts until you hit the Upload maximum of 8 GB. It is possible to add multiple Parts in parallel. You can decide the intended order of the Parts when you [complete the Upload](/docs/api-reference/uploads/complete). |
| [**cancelUpload**](UploadsApi.md#cancelUpload) | **POST** /uploads/{upload_id}/cancel | Cancels the Upload. No Parts may be added after an Upload is cancelled. |
| [**completeUpload**](UploadsApi.md#completeUpload) | **POST** /uploads/{upload_id}/complete | Completes the [Upload](/docs/api-reference/uploads/object). Within the returned Upload object, there is a nested [File](/docs/api-reference/files/object) object that is ready to use in the rest of the platform. You can specify the order of the Parts by passing in an ordered list of the Part IDs. The number of bytes uploaded upon completion must match the number of bytes initially specified when creating the Upload object. No Parts may be added after an Upload is completed. |
-| [**createUpload**](UploadsApi.md#createUpload) | **POST** /uploads | Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - [Assistants](/docs/assistants/tools/file-search#supported-files) For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). |
+| [**createUpload**](UploadsApi.md#createUpload) | **POST** /uploads | Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. For certain `purpose` values, the correct `mime_type` must be specified. Please refer to documentation for the [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files). For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create). |
@@ -156,7 +156,7 @@ Configure ApiKeyAuth:
# **createUpload**
> Upload createUpload(createUploadRequest)
-Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - [Assistants](/docs/assistants/tools/file-search#supported-files) For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).
+Creates an intermediate [Upload](/docs/api-reference/uploads/object) object that you can add [Parts](/docs/api-reference/uploads/part-object) to. Currently, an Upload can accept at most 8 GB in total and expires after an hour after you create it. Once you complete the Upload, we will create a [File](/docs/api-reference/files/object) object that contains all the parts you uploaded. This File is usable in the rest of our platform as a regular File object. For certain `purpose` values, the correct `mime_type` must be specified. Please refer to documentation for the [supported MIME types for your use case](/docs/assistants/tools/file-search#supported-files). For guidance on the proper filename extensions for each purpose, please follow the documentation on [creating a File](/docs/api-reference/files/create).
### Example
```kotlin
diff --git a/lib/docs/UrlCitation.md b/lib/docs/UrlCitation.md
new file mode 100644
index 00000000..2ec0ff3a
--- /dev/null
+++ b/lib/docs/UrlCitation.md
@@ -0,0 +1,21 @@
+
+# UrlCitation
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **url** | **kotlin.String** | The URL of the web resource. | |
+| **title** | **kotlin.String** | The title of the web resource. | |
+| **type** | [**inline**](#Type) | The type of the URL citation. Always `url_citation`. | |
+| **startIndex** | **kotlin.Int** | The index of the first character of the URL citation in the message. | |
+| **endIndex** | **kotlin.Int** | The index of the last character of the URL citation in the message. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | url_citation |
+
+
+
diff --git a/lib/docs/VectorStoreFileAttributesValue.md b/lib/docs/VectorStoreFileAttributesValue.md
new file mode 100644
index 00000000..4f083bd0
--- /dev/null
+++ b/lib/docs/VectorStoreFileAttributesValue.md
@@ -0,0 +1,9 @@
+
+# VectorStoreFileAttributesValue
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+
+
+
diff --git a/lib/docs/VectorStoreFileContentResponse.md b/lib/docs/VectorStoreFileContentResponse.md
new file mode 100644
index 00000000..e483a822
--- /dev/null
+++ b/lib/docs/VectorStoreFileContentResponse.md
@@ -0,0 +1,20 @@
+
+# VectorStoreFileContentResponse
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`object`** | [**inline**](#`Object`) | The object type, which is always `vector_store.file_content.page` | |
+| **`data`** | [**kotlin.collections.List<VectorStoreFileContentResponseDataInner>**](VectorStoreFileContentResponseDataInner.md) | Parsed content of the file. | |
+| **hasMore** | **kotlin.Boolean** | Indicates if there are more content pages to fetch. | |
+| **nextPage** | **kotlin.String** | The token for the next page, if any. | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | vector_store.file_content.page |
+
+
+
diff --git a/lib/docs/VectorStoreFileContentResponseDataInner.md b/lib/docs/VectorStoreFileContentResponseDataInner.md
new file mode 100644
index 00000000..4524c105
--- /dev/null
+++ b/lib/docs/VectorStoreFileContentResponseDataInner.md
@@ -0,0 +1,11 @@
+
+# VectorStoreFileContentResponseDataInner
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | **kotlin.String** | The content type (currently only `\"text\"`) | [optional] |
+| **text** | **kotlin.String** | The text content | [optional] |
+
+
+
diff --git a/lib/docs/VectorStoreFileObject.md b/lib/docs/VectorStoreFileObject.md
index 72140590..a1ed4c56 100644
--- a/lib/docs/VectorStoreFileObject.md
+++ b/lib/docs/VectorStoreFileObject.md
@@ -12,6 +12,7 @@
| **status** | [**inline**](#Status) | The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. | |
| **lastError** | [**VectorStoreFileObjectLastError**](VectorStoreFileObjectLastError.md) | | |
| **chunkingStrategy** | [**VectorStoreFileObjectChunkingStrategy**](VectorStoreFileObjectChunkingStrategy.md) | | [optional] |
+| **attributes** | [**kotlin.collections.Map<kotlin.String, VectorStoreFileAttributesValue>**](VectorStoreFileAttributesValue.md) | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. | [optional] |
diff --git a/lib/docs/VectorStoreSearchRequest.md b/lib/docs/VectorStoreSearchRequest.md
new file mode 100644
index 00000000..f1eba4e8
--- /dev/null
+++ b/lib/docs/VectorStoreSearchRequest.md
@@ -0,0 +1,14 @@
+
+# VectorStoreSearchRequest
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **query** | [**VectorStoreSearchRequestQuery**](VectorStoreSearchRequestQuery.md) | | |
+| **rewriteQuery** | **kotlin.Boolean** | Whether to rewrite the natural language query for vector search. | [optional] |
+| **maxNumResults** | **kotlin.Int** | The maximum number of results to return. This number should be between 1 and 50 inclusive. | [optional] |
+| **filters** | [**FileSearchToolFilters**](FileSearchToolFilters.md) | | [optional] |
+| **rankingOptions** | [**VectorStoreSearchRequestRankingOptions**](VectorStoreSearchRequestRankingOptions.md) | | [optional] |
+
+
+
diff --git a/lib/docs/VectorStoreSearchRequestQuery.md b/lib/docs/VectorStoreSearchRequestQuery.md
new file mode 100644
index 00000000..bf798343
--- /dev/null
+++ b/lib/docs/VectorStoreSearchRequestQuery.md
@@ -0,0 +1,9 @@
+
+# VectorStoreSearchRequestQuery
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+
+
+
diff --git a/lib/docs/VectorStoreSearchRequestRankingOptions.md b/lib/docs/VectorStoreSearchRequestRankingOptions.md
new file mode 100644
index 00000000..d65b6765
--- /dev/null
+++ b/lib/docs/VectorStoreSearchRequestRankingOptions.md
@@ -0,0 +1,18 @@
+
+# VectorStoreSearchRequestRankingOptions
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **ranker** | [**inline**](#Ranker) | | [optional] |
+| **scoreThreshold** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | | [optional] |
+
+
+
+## Enum: ranker
+| Name | Value |
+| ---- | ----- |
+| ranker | auto, default-2024-11-15 |
+
+
+
diff --git a/lib/docs/VectorStoreSearchResultContentObject.md b/lib/docs/VectorStoreSearchResultContentObject.md
new file mode 100644
index 00000000..a87587d8
--- /dev/null
+++ b/lib/docs/VectorStoreSearchResultContentObject.md
@@ -0,0 +1,18 @@
+
+# VectorStoreSearchResultContentObject
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of content. | |
+| **text** | **kotlin.String** | The text content returned from search. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | text |
+
+
+
diff --git a/lib/docs/VectorStoreSearchResultItem.md b/lib/docs/VectorStoreSearchResultItem.md
new file mode 100644
index 00000000..2f5d2504
--- /dev/null
+++ b/lib/docs/VectorStoreSearchResultItem.md
@@ -0,0 +1,14 @@
+
+# VectorStoreSearchResultItem
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **fileId** | **kotlin.String** | The ID of the vector store file. | |
+| **filename** | **kotlin.String** | The name of the vector store file. | |
+| **score** | [**java.math.BigDecimal**](java.math.BigDecimal.md) | The similarity score for the result. | |
+| **attributes** | [**kotlin.collections.Map<kotlin.String, VectorStoreFileAttributesValue>**](VectorStoreFileAttributesValue.md) | Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. | |
+| **content** | [**kotlin.collections.List<VectorStoreSearchResultContentObject>**](VectorStoreSearchResultContentObject.md) | Content chunks from the file. | |
+
+
+
diff --git a/lib/docs/VectorStoreSearchResultsPage.md b/lib/docs/VectorStoreSearchResultsPage.md
new file mode 100644
index 00000000..75b0e369
--- /dev/null
+++ b/lib/docs/VectorStoreSearchResultsPage.md
@@ -0,0 +1,21 @@
+
+# VectorStoreSearchResultsPage
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **`object`** | [**inline**](#`Object`) | The object type, which is always `vector_store.search_results.page` | |
+| **searchQuery** | **kotlin.collections.List<kotlin.String>** | | |
+| **`data`** | [**kotlin.collections.List<VectorStoreSearchResultItem>**](VectorStoreSearchResultItem.md) | The list of search result items. | |
+| **hasMore** | **kotlin.Boolean** | Indicates if there are more results to fetch. | |
+| **nextPage** | **kotlin.String** | The token for the next page, if any. | |
+
+
+
+## Enum: object
+| Name | Value |
+| ---- | ----- |
+| `object` | vector_store.search_results.page |
+
+
+
diff --git a/lib/docs/VectorStoresApi.md b/lib/docs/VectorStoresApi.md
index efd9ed20..6055b754 100644
--- a/lib/docs/VectorStoresApi.md
+++ b/lib/docs/VectorStoresApi.md
@@ -17,6 +17,9 @@ All URIs are relative to *https://api.openai.com/v1*
| [**listVectorStoreFiles**](VectorStoresApi.md#listVectorStoreFiles) | **GET** /vector_stores/{vector_store_id}/files | Returns a list of vector store files. |
| [**listVectorStores**](VectorStoresApi.md#listVectorStores) | **GET** /vector_stores | Returns a list of vector stores. |
| [**modifyVectorStore**](VectorStoresApi.md#modifyVectorStore) | **POST** /vector_stores/{vector_store_id} | Modifies a vector store. |
+| [**retrieveVectorStoreFileContent**](VectorStoresApi.md#retrieveVectorStoreFileContent) | **GET** /vector_stores/{vector_store_id}/files/{file_id}/content | Retrieve the parsed contents of a vector store file. |
+| [**searchVectorStore**](VectorStoresApi.md#searchVectorStore) | **POST** /vector_stores/{vector_store_id}/search | Search a vector store for relevant chunks based on a query and file attributes filter. |
+| [**updateVectorStoreFileAttributes**](VectorStoresApi.md#updateVectorStoreFileAttributes) | **POST** /vector_stores/{vector_store_id}/files/{file_id} | Update attributes on a vector store file. |
@@ -651,6 +654,152 @@ try {
### Authorization
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json
+
+
+# **retrieveVectorStoreFileContent**
+> VectorStoreFileContentResponse retrieveVectorStoreFileContent(vectorStoreId, fileId)
+
+Retrieve the parsed contents of a vector store file.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = VectorStoresApi()
+val vectorStoreId : kotlin.String = vs_abc123 // kotlin.String | The ID of the vector store.
+val fileId : kotlin.String = file-abc123 // kotlin.String | The ID of the file within the vector store.
+try {
+ val result : VectorStoreFileContentResponse = apiInstance.retrieveVectorStoreFileContent(vectorStoreId, fileId)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling VectorStoresApi#retrieveVectorStoreFileContent")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling VectorStoresApi#retrieveVectorStoreFileContent")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **vectorStoreId** | **kotlin.String**| The ID of the vector store. | |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **fileId** | **kotlin.String**| The ID of the file within the vector store. | |
+
+### Return type
+
+[**VectorStoreFileContentResponse**](VectorStoreFileContentResponse.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: Not defined
+ - **Accept**: application/json
+
+
+# **searchVectorStore**
+> VectorStoreSearchResultsPage searchVectorStore(vectorStoreId, vectorStoreSearchRequest)
+
+Search a vector store for relevant chunks based on a query and file attributes filter.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = VectorStoresApi()
+val vectorStoreId : kotlin.String = vs_abc123 // kotlin.String | The ID of the vector store to search.
+val vectorStoreSearchRequest : VectorStoreSearchRequest = // VectorStoreSearchRequest |
+try {
+ val result : VectorStoreSearchResultsPage = apiInstance.searchVectorStore(vectorStoreId, vectorStoreSearchRequest)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling VectorStoresApi#searchVectorStore")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling VectorStoresApi#searchVectorStore")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **vectorStoreId** | **kotlin.String**| The ID of the vector store to search. | |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **vectorStoreSearchRequest** | [**VectorStoreSearchRequest**](VectorStoreSearchRequest.md)| | |
+
+### Return type
+
+[**VectorStoreSearchResultsPage**](VectorStoreSearchResultsPage.md)
+
+### Authorization
+
+
+Configure ApiKeyAuth:
+ ApiClient.accessToken = ""
+
+### HTTP request headers
+
+ - **Content-Type**: application/json
+ - **Accept**: application/json
+
+
+# **updateVectorStoreFileAttributes**
+> VectorStoreFileObject updateVectorStoreFileAttributes(vectorStoreId, fileId, updateVectorStoreFileAttributesRequest)
+
+Update attributes on a vector store file.
+
+### Example
+```kotlin
+// Import classes:
+//import com.openai.infrastructure.*
+//import com.openai.models.*
+
+val apiInstance = VectorStoresApi()
+val vectorStoreId : kotlin.String = vs_abc123 // kotlin.String | The ID of the vector store the file belongs to.
+val fileId : kotlin.String = file-abc123 // kotlin.String | The ID of the file to update attributes.
+val updateVectorStoreFileAttributesRequest : UpdateVectorStoreFileAttributesRequest = // UpdateVectorStoreFileAttributesRequest |
+try {
+ val result : VectorStoreFileObject = apiInstance.updateVectorStoreFileAttributes(vectorStoreId, fileId, updateVectorStoreFileAttributesRequest)
+ println(result)
+} catch (e: ClientException) {
+ println("4xx response calling VectorStoresApi#updateVectorStoreFileAttributes")
+ e.printStackTrace()
+} catch (e: ServerException) {
+ println("5xx response calling VectorStoresApi#updateVectorStoreFileAttributes")
+ e.printStackTrace()
+}
+```
+
+### Parameters
+| **vectorStoreId** | **kotlin.String**| The ID of the vector store the file belongs to. | |
+| **fileId** | **kotlin.String**| The ID of the file to update attributes. | |
+| Name | Type | Description | Notes |
+| ------------- | ------------- | ------------- | ------------- |
+| **updateVectorStoreFileAttributesRequest** | [**UpdateVectorStoreFileAttributesRequest**](UpdateVectorStoreFileAttributesRequest.md)| | |
+
+### Return type
+
+[**VectorStoreFileObject**](VectorStoreFileObject.md)
+
+### Authorization
+
+
Configure ApiKeyAuth:
ApiClient.accessToken = ""
diff --git a/lib/docs/VoiceIdsShared.md b/lib/docs/VoiceIdsShared.md
new file mode 100644
index 00000000..e774f5a5
--- /dev/null
+++ b/lib/docs/VoiceIdsShared.md
@@ -0,0 +1,9 @@
+
+# VoiceIdsShared
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+
+
+
diff --git a/lib/docs/Wait.md b/lib/docs/Wait.md
new file mode 100644
index 00000000..2ee5e080
--- /dev/null
+++ b/lib/docs/Wait.md
@@ -0,0 +1,17 @@
+
+# Wait
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | Specifies the event type. For a wait action, this property is always set to `wait`. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | wait |
+
+
+
diff --git a/lib/docs/WebSearch.md b/lib/docs/WebSearch.md
new file mode 100644
index 00000000..5bc3fbb9
--- /dev/null
+++ b/lib/docs/WebSearch.md
@@ -0,0 +1,11 @@
+
+# WebSearch
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **userLocation** | [**WebSearchUserLocation**](WebSearchUserLocation.md) | | [optional] |
+| **searchContextSize** | [**WebSearchContextSize**](WebSearchContextSize.md) | | [optional] |
+
+
+
diff --git a/lib/docs/WebSearchContextSize.md b/lib/docs/WebSearchContextSize.md
new file mode 100644
index 00000000..a8b522cf
--- /dev/null
+++ b/lib/docs/WebSearchContextSize.md
@@ -0,0 +1,14 @@
+
+# WebSearchContextSize
+
+## Enum
+
+
+ * `low` (value: `"low"`)
+
+ * `medium` (value: `"medium"`)
+
+ * `high` (value: `"high"`)
+
+
+
diff --git a/lib/docs/WebSearchLocation.md b/lib/docs/WebSearchLocation.md
new file mode 100644
index 00000000..37ab0d9d
--- /dev/null
+++ b/lib/docs/WebSearchLocation.md
@@ -0,0 +1,13 @@
+
+# WebSearchLocation
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **country** | **kotlin.String** | The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`. | [optional] |
+| **region** | **kotlin.String** | Free text input for the region of the user, e.g. `California`. | [optional] |
+| **city** | **kotlin.String** | Free text input for the city of the user, e.g. `San Francisco`. | [optional] |
+| **timezone** | **kotlin.String** | The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`. | [optional] |
+
+
+
diff --git a/lib/docs/WebSearchTool.md b/lib/docs/WebSearchTool.md
new file mode 100644
index 00000000..c7478082
--- /dev/null
+++ b/lib/docs/WebSearchTool.md
@@ -0,0 +1,19 @@
+
+# WebSearchTool
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of the web search tool. One of: - `web_search_preview` - `web_search_preview_2025_03_11` | |
+| **userLocation** | [**WebSearchToolUserLocation**](WebSearchToolUserLocation.md) | | [optional] |
+| **searchContextSize** | [**WebSearchContextSize**](WebSearchContextSize.md) | | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | web_search_preview, web_search_preview_2025_03_11 |
+
+
+
diff --git a/lib/docs/WebSearchToolCall.md b/lib/docs/WebSearchToolCall.md
new file mode 100644
index 00000000..4e3c7a90
--- /dev/null
+++ b/lib/docs/WebSearchToolCall.md
@@ -0,0 +1,26 @@
+
+# WebSearchToolCall
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **id** | **kotlin.String** | The unique ID of the web search tool call. | |
+| **type** | [**inline**](#Type) | The type of the web search tool call. Always `web_search_call`. | |
+| **status** | [**inline**](#Status) | The status of the web search tool call. | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | web_search_call |
+
+
+
+## Enum: status
+| Name | Value |
+| ---- | ----- |
+| status | in_progress, searching, completed, failed |
+
+
+
diff --git a/lib/docs/WebSearchToolUserLocation.md b/lib/docs/WebSearchToolUserLocation.md
new file mode 100644
index 00000000..a96b2f5b
--- /dev/null
+++ b/lib/docs/WebSearchToolUserLocation.md
@@ -0,0 +1,21 @@
+
+# WebSearchToolUserLocation
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of location approximation. Always `approximate`. | |
+| **country** | **kotlin.String** | The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`. | [optional] |
+| **region** | **kotlin.String** | Free text input for the region of the user, e.g. `California`. | [optional] |
+| **city** | **kotlin.String** | Free text input for the city of the user, e.g. `San Francisco`. | [optional] |
+| **timezone** | **kotlin.String** | The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`. | [optional] |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | approximate |
+
+
+
diff --git a/lib/docs/WebSearchUserLocation.md b/lib/docs/WebSearchUserLocation.md
new file mode 100644
index 00000000..b44a6f70
--- /dev/null
+++ b/lib/docs/WebSearchUserLocation.md
@@ -0,0 +1,18 @@
+
+# WebSearchUserLocation
+
+## Properties
+| Name | Type | Description | Notes |
+| ------------ | ------------- | ------------- | ------------- |
+| **type** | [**inline**](#Type) | The type of location approximation. Always `approximate`. | |
+| **approximate** | [**WebSearchLocation**](WebSearchLocation.md) | | |
+
+
+
+## Enum: type
+| Name | Value |
+| ---- | ----- |
+| type | approximate |
+
+
+
diff --git a/lib/src/main/kotlin/com/openai/apis/AssistantsApi.kt b/lib/src/main/kotlin/com/openai/apis/AssistantsApi.kt
index ad088b7a..0aeb9737 100644
--- a/lib/src/main/kotlin/com/openai/apis/AssistantsApi.kt
+++ b/lib/src/main/kotlin/com/openai/apis/AssistantsApi.kt
@@ -63,7 +63,8 @@ class AssistantsApi(
}
/**
- * Cancels a run that is `in_progress`.
+ * POST /threads/{thread_id}/runs/{run_id}/cancel Cancels a run that is
+ * `in_progress`.
*
* @param threadId The ID of the thread to which this run belongs.
* @param runId The ID of the run to cancel.
@@ -118,7 +119,8 @@ class AssistantsApi(
}
/**
- * Cancels a run that is `in_progress`.
+ * POST /threads/{thread_id}/runs/{run_id}/cancel Cancels a run that is
+ * `in_progress`.
*
* @param threadId The ID of the thread to which this run belongs.
* @param runId The ID of the run to cancel.
@@ -174,7 +176,7 @@ class AssistantsApi(
}
/**
- * Create an assistant with a model and instructions.
+ * POST /assistants Create an assistant with a model and instructions.
*
* @param createAssistantRequest
* @return AssistantObject
@@ -232,7 +234,7 @@ class AssistantsApi(
}
/**
- * Create an assistant with a model and instructions.
+ * POST /assistants Create an assistant with a model and instructions.
*
* @param createAssistantRequest
* @return ApiResponse
@@ -280,7 +282,7 @@ class AssistantsApi(
}
/**
- * Create a message.
+ * POST /threads/{thread_id}/messages Create a message.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) to
* create a message for.
@@ -342,7 +344,7 @@ class AssistantsApi(
}
/**
- * Create a message.
+ * POST /threads/{thread_id}/messages Create a message.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) to
* create a message for.
@@ -420,7 +422,7 @@ class AssistantsApi(
}
/**
- * Create a run.
+ * POST /threads/{thread_id}/runs Create a run.
*
* @param threadId The ID of the thread to run.
* @param createRunRequest
@@ -489,7 +491,7 @@ class AssistantsApi(
}
/**
- * Create a run.
+ * POST /threads/{thread_id}/runs Create a run.
*
* @param threadId The ID of the thread to run.
* @param createRunRequest
@@ -572,7 +574,7 @@ class AssistantsApi(
}
/**
- * Create a thread.
+ * POST /threads Create a thread.
*
* @param createThreadRequest (optional)
* @return ThreadObject
@@ -628,7 +630,7 @@ class AssistantsApi(
}
/**
- * Create a thread.
+ * POST /threads Create a thread.
*
* @param createThreadRequest (optional)
* @return ApiResponse
@@ -672,7 +674,7 @@ class AssistantsApi(
}
/**
- * Create a thread and run it in one request.
+ * POST /threads/runs Create a thread and run it in one request.
*
* @param createThreadAndRunRequest
* @return RunObject
@@ -730,7 +732,7 @@ class AssistantsApi(
}
/**
- * Create a thread and run it in one request.
+ * POST /threads/runs Create a thread and run it in one request.
*
* @param createThreadAndRunRequest
* @return ApiResponse
@@ -778,7 +780,7 @@ class AssistantsApi(
}
/**
- * Delete an assistant.
+ * DELETE /assistants/{assistant_id} Delete an assistant.
*
* @param assistantId The ID of the assistant to delete.
* @return DeleteAssistantResponse
@@ -832,7 +834,7 @@ class AssistantsApi(
}
/**
- * Delete an assistant.
+ * DELETE /assistants/{assistant_id} Delete an assistant.
*
* @param assistantId The ID of the assistant to delete.
* @return ApiResponse
@@ -880,7 +882,7 @@ class AssistantsApi(
}
/**
- * Deletes a message.
+ * DELETE /threads/{thread_id}/messages/{message_id} Deletes a message.
*
* @param threadId The ID of the thread to which this message belongs.
* @param messageId The ID of the message to delete.
@@ -941,7 +943,7 @@ class AssistantsApi(
}
/**
- * Deletes a message.
+ * DELETE /threads/{thread_id}/messages/{message_id} Deletes a message.
*
* @param threadId The ID of the thread to which this message belongs.
* @param messageId The ID of the message to delete.
@@ -1000,7 +1002,7 @@ class AssistantsApi(
}
/**
- * Delete a thread.
+ * DELETE /threads/{thread_id} Delete a thread.
*
* @param threadId The ID of the thread to delete.
* @return DeleteThreadResponse
@@ -1053,7 +1055,7 @@ class AssistantsApi(
}
/**
- * Delete a thread.
+ * DELETE /threads/{thread_id} Delete a thread.
*
* @param threadId The ID of the thread to delete.
* @return ApiResponse
@@ -1100,7 +1102,7 @@ class AssistantsApi(
}
/**
- * Retrieves an assistant.
+ * GET /assistants/{assistant_id} Retrieves an assistant.
*
* @param assistantId The ID of the assistant to retrieve.
* @return AssistantObject
@@ -1154,7 +1156,7 @@ class AssistantsApi(
}
/**
- * Retrieves an assistant.
+ * GET /assistants/{assistant_id} Retrieves an assistant.
*
* @param assistantId The ID of the assistant to retrieve.
* @return ApiResponse
@@ -1202,7 +1204,7 @@ class AssistantsApi(
}
/**
- * Retrieve a message.
+ * GET /threads/{thread_id}/messages/{message_id} Retrieve a message.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) to
* which this message belongs.
@@ -1261,7 +1263,7 @@ class AssistantsApi(
}
/**
- * Retrieve a message.
+ * GET /threads/{thread_id}/messages/{message_id} Retrieve a message.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) to
* which this message belongs.
@@ -1319,7 +1321,7 @@ class AssistantsApi(
}
/**
- * Retrieves a run.
+ * GET /threads/{thread_id}/runs/{run_id} Retrieves a run.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) that
* was run.
@@ -1375,7 +1377,7 @@ class AssistantsApi(
}
/**
- * Retrieves a run.
+ * GET /threads/{thread_id}/runs/{run_id} Retrieves a run.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) that
* was run.
@@ -1453,7 +1455,8 @@ class AssistantsApi(
}
/**
- * Retrieves a run step.
+ * GET /threads/{thread_id}/runs/{run_id}/steps/{step_id} Retrieves a run
+ * step.
*
* @param threadId The ID of the thread to which the run and run step
* belongs.
@@ -1526,7 +1529,8 @@ class AssistantsApi(
}
/**
- * Retrieves a run step.
+ * GET /threads/{thread_id}/runs/{run_id}/steps/{step_id} Retrieves a run
+ * step.
*
* @param threadId The ID of the thread to which the run and run step
* belongs.
@@ -1623,7 +1627,7 @@ class AssistantsApi(
}
/**
- * Retrieves a thread.
+ * GET /threads/{thread_id} Retrieves a thread.
*
* @param threadId The ID of the thread to retrieve.
* @return ThreadObject
@@ -1676,7 +1680,7 @@ class AssistantsApi(
}
/**
- * Retrieves a thread.
+ * GET /threads/{thread_id} Retrieves a thread.
*
* @param threadId The ID of the thread to retrieve.
* @return ApiResponse
@@ -1737,7 +1741,7 @@ class AssistantsApi(
}
/**
- * Returns a list of assistants.
+ * GET /assistants Returns a list of assistants.
*
* @param limit A limit on the number of objects to be returned. Limit can
* range between 1 and 100, and the default is 20. (optional, default
@@ -1816,7 +1820,7 @@ class AssistantsApi(
}
/**
- * Returns a list of assistants.
+ * GET /assistants Returns a list of assistants.
*
* @param limit A limit on the number of objects to be returned. Limit can
* range between 1 and 100, and the default is 20. (optional, default
@@ -1934,7 +1938,8 @@ class AssistantsApi(
}
/**
- * Returns a list of messages for a given thread.
+ * GET /threads/{thread_id}/messages Returns a list of messages for a given
+ * thread.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) the
* messages belong to.
@@ -2021,7 +2026,8 @@ class AssistantsApi(
}
/**
- * Returns a list of messages for a given thread.
+ * GET /threads/{thread_id}/messages Returns a list of messages for a given
+ * thread.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) the
* messages belong to.
@@ -2181,7 +2187,8 @@ class AssistantsApi(
}
/**
- * Returns a list of run steps belonging to a run.
+ * GET /threads/{thread_id}/runs/{run_id}/steps Returns a list of run steps
+ * belonging to a run.
*
* @param threadId The ID of the thread the run and run steps belong to.
* @param runId The ID of the run the run steps belong to.
@@ -2274,7 +2281,8 @@ class AssistantsApi(
}
/**
- * Returns a list of run steps belonging to a run.
+ * GET /threads/{thread_id}/runs/{run_id}/steps Returns a list of run steps
+ * belonging to a run.
*
* @param threadId The ID of the thread the run and run steps belong to.
* @param runId The ID of the run the run steps belong to.
@@ -2432,7 +2440,8 @@ class AssistantsApi(
}
/**
- * Returns a list of runs belonging to a thread.
+ * GET /threads/{thread_id}/runs Returns a list of runs belonging to a
+ * thread.
*
* @param threadId The ID of the thread the run belongs to.
* @param limit A limit on the number of objects to be returned. Limit can
@@ -2514,7 +2523,8 @@ class AssistantsApi(
}
/**
- * Returns a list of runs belonging to a thread.
+ * GET /threads/{thread_id}/runs Returns a list of runs belonging to a
+ * thread.
*
* @param threadId The ID of the thread the run belongs to.
* @param limit A limit on the number of objects to be returned. Limit can
@@ -2626,7 +2636,7 @@ class AssistantsApi(
}
/**
- * Modifies an assistant.
+ * POST /assistants/{assistant_id} Modifies an assistant.
*
* @param assistantId The ID of the assistant to modify.
* @param modifyAssistantRequest
@@ -2687,7 +2697,7 @@ class AssistantsApi(
}
/**
- * Modifies an assistant.
+ * POST /assistants/{assistant_id} Modifies an assistant.
*
* @param assistantId The ID of the assistant to modify.
* @param modifyAssistantRequest
@@ -2745,7 +2755,7 @@ class AssistantsApi(
}
/**
- * Modifies a message.
+ * POST /threads/{thread_id}/messages/{message_id} Modifies a message.
*
* @param threadId The ID of the thread to which this message belongs.
* @param messageId The ID of the message to modify.
@@ -2809,7 +2819,7 @@ class AssistantsApi(
}
/**
- * Modifies a message.
+ * POST /threads/{thread_id}/messages/{message_id} Modifies a message.
*
* @param threadId The ID of the thread to which this message belongs.
* @param messageId The ID of the message to modify.
@@ -2874,7 +2884,7 @@ class AssistantsApi(
}
/**
- * Modifies a run.
+ * POST /threads/{thread_id}/runs/{run_id} Modifies a run.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) that
* was run.
@@ -2939,7 +2949,7 @@ class AssistantsApi(
}
/**
- * Modifies a run.
+ * POST /threads/{thread_id}/runs/{run_id} Modifies a run.
*
* @param threadId The ID of the [thread](/docs/api-reference/threads) that
* was run.
@@ -3006,7 +3016,7 @@ class AssistantsApi(
}
/**
- * Modifies a thread.
+ * POST /threads/{thread_id} Modifies a thread.
*
* @param threadId The ID of the thread to modify. Only the
* `metadata` can be modified.
@@ -3068,7 +3078,7 @@ class AssistantsApi(
}
/**
- * Modifies a thread.
+ * POST /threads/{thread_id} Modifies a thread.
*
* @param threadId The ID of the thread to modify. Only the
* `metadata` can be modified.
@@ -3126,7 +3136,8 @@ class AssistantsApi(
}
/**
- * When a run has the `status: \"requires_action\"` and
+ * POST /threads/{thread_id}/runs/{run_id}/submit_tool_outputs When a run
+ * has the `status: \"requires_action\"` and
* `required_action.type` is `submit_tool_outputs`, this
* endpoint can be used to submit the outputs from the tool calls once
* they're all completed. All outputs must be submitted in a single
@@ -3195,7 +3206,8 @@ class AssistantsApi(
}
/**
- * When a run has the `status: \"requires_action\"` and
+ * POST /threads/{thread_id}/runs/{run_id}/submit_tool_outputs When a run
+ * has the `status: \"requires_action\"` and
* `required_action.type` is `submit_tool_outputs`, this
* endpoint can be used to submit the outputs from the tool calls once
* they're all completed. All outputs must be submitted in a single
diff --git a/lib/src/main/kotlin/com/openai/apis/AudioApi.kt b/lib/src/main/kotlin/com/openai/apis/AudioApi.kt
index 26f3b7da..c2c9644c 100644
--- a/lib/src/main/kotlin/com/openai/apis/AudioApi.kt
+++ b/lib/src/main/kotlin/com/openai/apis/AudioApi.kt
@@ -28,6 +28,8 @@ import com.openai.models.CreateSpeechRequest
import com.openai.models.CreateTranscription200Response
import com.openai.models.CreateTranscriptionRequestModel
import com.openai.models.CreateTranslation200Response
+import com.openai.models.CreateTranslationRequestModel
+import com.openai.models.TranscriptionInclude
import com.squareup.moshi.Json
import java.io.IOException
import okhttp3.Call
@@ -46,7 +48,7 @@ class AudioApi(
}
/**
- * Generates audio from the input text.
+ * POST /audio/speech Generates audio from the input text.
*
* @param createSpeechRequest
* @return java.io.File
@@ -100,7 +102,7 @@ class AudioApi(
}
/**
- * Generates audio from the input text.
+ * POST /audio/speech Generates audio from the input text.
*
* @param createSpeechRequest
* @return ApiResponse
@@ -162,7 +164,7 @@ class AudioApi(
}
/**
- * Transcribes audio into the input language.
+ * POST /audio/transcriptions Transcribes audio into the input language.
*
* @param file The audio file object (not file name) to transcribe, in one
* of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
@@ -184,6 +186,13 @@ class AudioApi(
* [log probability](https://en.wikipedia.org/wiki/Log_probability) to
* automatically increase the temperature until certain thresholds are
* hit. (optional, default to 0)
+ * @param include Additional information to include in the transcription
+ * response. `logprobs` will return the log probabilities of the
+ * tokens in the response to understand the model's confidence in the
+ * transcription. `logprobs` only works with response_format set
+ * to `json` and only with the models
+ * `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`.
+ * (optional)
* @param timestampGranularities The timestamp granularities to populate for
* this transcription. `response_format` must be set
* `verbose_json` to use timestamp granularities. Either or both
@@ -191,6 +200,14 @@ class AudioApi(
* `segment`. Note: There is no additional latency for segment
* timestamps, but generating word timestamps incurs additional latency.
* (optional)
+ * @param stream If set to true, the model response data will be streamed to
+ * the client as it is generated using
+ * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ * See the
+ * [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+ * for more information. Note: Streaming is not supported for the
+ * `whisper-1` model and will be ignored. (optional, default to
+ * false)
* @return CreateTranscription200Response
* @throws IllegalStateException If the request is not correctly configured
* @throws IOException Rethrows the OkHttp execute method exception
@@ -212,13 +229,17 @@ class AudioApi(
model: CreateTranscriptionRequestModel,
language: kotlin.String? = null,
prompt: kotlin.String? = null,
+ // openai-openapi-kotlin change begin
responseFormat: AudioResponseFormat? = AudioResponseFormat.json,
+ // openai-openapi-kotlin change end
temperature: java.math.BigDecimal? = java.math.BigDecimal("0"),
+ include: kotlin.collections.List? = null,
timestampGranularities:
kotlin.collections.List<
TimestampGranularitiesCreateTranscription
>? =
null,
+ stream: kotlin.Boolean? = false,
): CreateTranscription200Response {
val localVarResponse =
createTranscriptionWithHttpInfo(
@@ -228,7 +249,9 @@ class AudioApi(
prompt = prompt,
responseFormat = responseFormat,
temperature = temperature,
+ include = include,
timestampGranularities = timestampGranularities,
+ stream = stream,
)
return when (localVarResponse.responseType) {
@@ -263,7 +286,7 @@ class AudioApi(
}
/**
- * Transcribes audio into the input language.
+ * POST /audio/transcriptions Transcribes audio into the input language.
*
* @param file The audio file object (not file name) to transcribe, in one
* of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
@@ -285,6 +308,13 @@ class AudioApi(
* [log probability](https://en.wikipedia.org/wiki/Log_probability) to
* automatically increase the temperature until certain thresholds are
* hit. (optional, default to 0)
+ * @param include Additional information to include in the transcription
+ * response. `logprobs` will return the log probabilities of the
+ * tokens in the response to understand the model's confidence in the
+ * transcription. `logprobs` only works with response_format set
+ * to `json` and only with the models
+ * `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`.
+ * (optional)
* @param timestampGranularities The timestamp granularities to populate for
* this transcription. `response_format` must be set
* `verbose_json` to use timestamp granularities. Either or both
@@ -292,6 +322,14 @@ class AudioApi(
* `segment`. Note: There is no additional latency for segment
* timestamps, but generating word timestamps incurs additional latency.
* (optional)
+ * @param stream If set to true, the model response data will be streamed to
+ * the client as it is generated using
+ * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ * See the
+ * [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+ * for more information. Note: Streaming is not supported for the
+ * `whisper-1` model and will be ignored. (optional, default to
+ * false)
* @return ApiResponse
* @throws IllegalStateException If the request is not correctly configured
* @throws IOException Rethrows the OkHttp execute method exception
@@ -305,8 +343,10 @@ class AudioApi(
prompt: kotlin.String?,
responseFormat: AudioResponseFormat?,
temperature: java.math.BigDecimal?,
+ include: kotlin.collections.List?,
timestampGranularities:
kotlin.collections.List?,
+ stream: kotlin.Boolean?,
): ApiResponse {
val localVariableConfig =
createTranscriptionRequestConfig(
@@ -316,7 +356,9 @@ class AudioApi(
prompt = prompt,
responseFormat = responseFormat,
temperature = temperature,
+ include = include,
timestampGranularities = timestampGranularities,
+ stream = stream,
)
return request<
@@ -350,6 +392,13 @@ class AudioApi(
* [log probability](https://en.wikipedia.org/wiki/Log_probability) to
* automatically increase the temperature until certain thresholds are
* hit. (optional, default to 0)
+ * @param include Additional information to include in the transcription
+ * response. `logprobs` will return the log probabilities of the
+ * tokens in the response to understand the model's confidence in the
+ * transcription. `logprobs` only works with response_format set
+ * to `json` and only with the models
+ * `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`.
+ * (optional)
* @param timestampGranularities The timestamp granularities to populate for
* this transcription. `response_format` must be set
* `verbose_json` to use timestamp granularities. Either or both
@@ -357,6 +406,14 @@ class AudioApi(
* `segment`. Note: There is no additional latency for segment
* timestamps, but generating word timestamps incurs additional latency.
* (optional)
+ * @param stream If set to true, the model response data will be streamed to
+ * the client as it is generated using
+ * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
+ * See the
+ * [Streaming section of the Speech-to-Text guide](/docs/guides/speech-to-text?lang=curl#streaming-transcriptions)
+ * for more information. Note: Streaming is not supported for the
+ * `whisper-1` model and will be ignored. (optional, default to
+ * false)
* @return RequestConfig
*/
fun createTranscriptionRequestConfig(
@@ -366,8 +423,10 @@ class AudioApi(
prompt: kotlin.String?,
responseFormat: AudioResponseFormat?,
temperature: java.math.BigDecimal?,
+ include: kotlin.collections.List?,
timestampGranularities:
kotlin.collections.List?,
+ stream: kotlin.Boolean?,
): RequestConfig