diff --git a/async-openai/src/types/responses/response.rs b/async-openai/src/types/responses/response.rs index d90ab66e..64eb420f 100644 --- a/async-openai/src/types/responses/response.rs +++ b/async-openai/src/types/responses/response.rs @@ -1440,6 +1440,7 @@ pub struct ResponseLogProb { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct OutputTextContent { /// The annotations of the text output. + #[serde(default)] pub annotations: Vec, pub logprobs: Option>, /// The text output from the model. diff --git a/async-openai/src/types/shared/response_usage.rs b/async-openai/src/types/shared/response_usage.rs index 92fc99da..09cd7bb7 100644 --- a/async-openai/src/types/shared/response_usage.rs +++ b/async-openai/src/types/shared/response_usage.rs @@ -1,20 +1,21 @@ use serde::{Deserialize, Serialize}; -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] pub struct InputTokenDetails { /// The number of tokens that were retrieved from the cache. /// [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). pub cached_tokens: u32, } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] pub struct OutputTokenDetails { /// The number of reasoning tokens. pub reasoning_tokens: u32, } /// Usage statistics for a response. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(default)] pub struct ResponseUsage { /// The number of input tokens. pub input_tokens: u32,