/* Options:
Date: 2025-11-03 11:38:27
Version: 8.91
Tip: To override a DTO option, remove "//" prefix before updating
BaseUrl: https://localhost:5001

//GlobalNamespace: 
//MakePropertiesOptional: False
//AddServiceStackTypes: True
//AddResponseStatus: False
//AddImplicitVersion: 
//AddDescriptionAsComments: True
IncludeTypes: ChatCompletion.*
//ExcludeTypes: 
//DefaultImports: serde::{Serialize, Deserialize},std::collections::HashMap
*/

use serde::{Serialize, Deserialize};
use std::collections::HashMap;


#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct IReturn<T> {
    #[serde(skip)]
    pub _phantom: std::marker::PhantomData<T>,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct IPost {
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiContent {
    /// The type of the content part.
    pub r#type: String,
}

/// The tool calls generated by the model, such as function calls.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ToolCall {
    /// The ID of the tool call.
    pub id: String,
    /// The type of the tool. Currently, only `function` is supported.
    pub r#type: String,
    /// The function that the model called.
    pub function: String,
}

/// A list of messages comprising the conversation so far.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiMessage {
    /// The contents of the message.
    pub content: Option<Vec<AiContent>>,
    /// The role of the author of this message. Valid values are `system`, `user`, `assistant` and `tool`.
    pub role: String,
    /// An optional name for the participant. Provides the model information to differentiate between participants of the same role.
    pub name: Option<String>,
    /// The tool calls generated by the model, such as function calls.
    pub tool_calls: Option<Vec<ToolCall>>,
    /// Tool call that this message is responding to.
    pub tool_call_id: Option<String>,
}

/// Parameters for audio output. Required when audio output is requested with modalities: [audio]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiChatAudio {
    /// Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16.
    pub format: String,
    /// The voice the model uses to respond. Supported voices are alloy, ash, ballad, coral, echo, fable, nova, onyx, sage, and shimmer.
    pub voice: String,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub enum ResponseFormat {
    #[serde(rename = "text")]
    Text,
    #[serde(rename = "json_object")]
    JsonObject,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiResponseFormat {
    /// An object specifying the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.
    #[serde(rename = "response_format")]
    pub r#type: ResponseFormat,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub enum ToolType {
    #[serde(rename = "function")]
    Function,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Tool {
    /// The type of the tool. Currently, only function is supported.
    pub r#type: ToolType,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ResponseError {
    #[serde(rename = "errorCode")]
    pub error_code: String,
    #[serde(rename = "fieldName")]
    pub field_name: String,
    pub message: String,
    pub meta: Option<HashMap<String, String>>,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ResponseStatus {
    #[serde(rename = "errorCode")]
    pub error_code: String,
    pub message: Option<String>,
    #[serde(rename = "stackTrace")]
    pub stack_trace: Option<String>,
    pub errors: Option<Vec<ResponseError>>,
    pub meta: Option<HashMap<String, String>>,
}

/// Annotations for the message, when applicable, as when using the web search tool.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct UrlCitation {
    /// The index of the last character of the URL citation in the message.
    pub end_index: i32,
    /// The index of the first character of the URL citation in the message.
    pub start_index: i32,
    /// The title of the web resource.
    pub title: String,
    /// The URL of the web resource.
    pub url: String,
}

/// Annotations for the message, when applicable, as when using the web search tool.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ChoiceAnnotation {
    /// The type of the URL citation. Always url_citation.
    pub r#type: String,
    /// A URL citation when using web search.
    pub url_citation: UrlCitation,
}

/// If the audio output modality is requested, this object contains data about the audio response from the model.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ChoiceAudio {
    /// Base64 encoded audio bytes generated by the model, in the format specified in the request.
    pub data: String,
    /// The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.
    pub expires_at: i32,
    /// Unique identifier for this audio response.
    pub id: String,
    /// Transcript of the audio generated by the model.
    pub transcript: String,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ChoiceMessage {
    /// The contents of the message.
    pub content: String,
    /// The refusal message generated by the model.
    pub refusal: Option<String>,
    /// The reasoning process used by the model.
    pub reasoning: Option<String>,
    /// The role of the author of this message.
    pub role: String,
    /// Annotations for the message, when applicable, as when using the web search tool.
    pub annotations: Option<Vec<ChoiceAnnotation>>,
    /// If the audio output modality is requested, this object contains data about the audio response from the model.
    pub audio: Option<ChoiceAudio>,
    /// The tool calls generated by the model, such as function calls.
    pub tool_calls: Option<Vec<ToolCall>>,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Choice {
    /// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool
    pub finish_reason: String,
    /// The index of the choice in the list of choices.
    pub index: i32,
    /// A chat completion message generated by the model.
    pub message: ChoiceMessage,
}

/// Usage statistics for the completion request.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiCompletionUsage {
    /// When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion.
    pub accepted_prediction_tokens: i32,
    /// Audio input tokens generated by the model.
    pub audio_tokens: i32,
    /// Tokens generated by the model for reasoning.
    pub reasoning_tokens: i32,
    /// When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion.
    pub rejected_prediction_tokens: i32,
}

/// Breakdown of tokens used in the prompt.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiPromptUsage {
    /// When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion.
    pub accepted_prediction_tokens: i32,
    /// Audio input tokens present in the prompt.
    pub audio_tokens: i32,
    /// Cached tokens present in the prompt.
    pub cached_tokens: i32,
}

/// Usage statistics for the completion request.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiUsage {
    /// Number of tokens in the generated completion.
    pub completion_tokens: i32,
    /// Number of tokens in the prompt.
    pub prompt_tokens: i32,
    /// Total number of tokens used in the request (prompt + completion).
    pub total_tokens: i32,
    /// Breakdown of tokens used in a completion.
    pub completion_tokens_details: Option<AiCompletionUsage>,
    /// Breakdown of tokens used in the prompt.
    pub prompt_tokens_details: Option<AiPromptUsage>,
}

/// Text content part
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiTextContent {
    /// The text content.
    pub text: String,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiImageUrl {
    /// Either a URL of the image or the base64 encoded image data.
    pub url: String,
}

/// Image content part
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiImageContent {
    /// The image for this content.
    pub image_url: AiImageUrl,
}

/// Audio content part
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiInputAudio {
    /// URL or Base64 encoded audio data.
    pub data: String,
    /// The format of the encoded audio data. Currently supports 'wav' and 'mp3'.
    pub format: String,
}

/// Audio content part
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiAudioContent {
    /// The audio input for this content.
    pub input_audio: AiInputAudio,
}

/// File content part
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiFile {
    /// The URL or base64 encoded file data, used when passing the file to the model as a string.
    pub file_data: String,
    /// The name of the file, used when passing the file to the model as a string.
    pub filename: String,
    /// The ID of an uploaded file to use as input.
    pub file_id: Option<String>,
}

/// File content part
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct AiFileContent {
    /// The file input for this content.
    pub file: AiFile,
}

#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ChatResponse {
    /// A unique identifier for the chat completion.
    pub id: String,
    /// A list of chat completion choices. Can be more than one if n is greater than 1.
    pub choices: Vec<Choice>,
    /// The Unix timestamp (in seconds) of when the chat completion was created.
    pub created: i64,
    /// The model used for the chat completion.
    pub model: String,
    /// This fingerprint represents the backend configuration that the model runs with.
    pub system_fingerprint: Option<String>,
    /// The object type, which is always chat.completion.
    pub object: String,
    /// Specifies the processing type used for serving the request.
    pub service_tier: Option<String>,
    /// Usage statistics for the completion request.
    pub usage: AiUsage,
    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format.
    pub metadata: Option<HashMap<String, String>>,
    #[serde(rename = "responseStatus")]
    pub response_status: Option<ResponseStatus>,
}

/// Chat Completions API (OpenAI-Compatible)
// Route("/v1/chat/completions", "POST")
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct ChatCompletion {
    /// The messages to generate chat completions for.
    pub messages: Vec<AiMessage>,
    /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API
    pub model: String,
    /// Parameters for audio output. Required when audio output is requested with modalities: [audio]
    pub audio: Option<AiChatAudio>,
    /// Modify the likelihood of specified tokens appearing in the completion.
    pub logit_bias: Option<HashMap<i32, i32>>,
    /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format.
    pub metadata: Option<HashMap<String, String>>,
    /// Constrains effort on reasoning for reasoning models. Currently supported values are minimal, low, medium, and high (none, default). Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
    pub reasoning_effort: Option<String>,
    /// An object specifying the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting Type to ResponseFormat.JsonObject enables JSON mode, which guarantees the message the model generates is valid JSON.
    pub response_format: Option<AiResponseFormat>,
    /// Specifies the processing type used for serving the request.
    pub service_tier: Option<String>,
    /// A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user.
    pub safety_identifier: Option<String>,
    /// Up to 4 sequences where the API will stop generating further tokens.
    pub stop: Option<Vec<String>>,
    /// Output types that you would like the model to generate. Most models are capable of generating text, which is the default:
    pub modalities: Option<Vec<String>>,
    /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates.
    pub prompt_cache_key: Option<String>,
    /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
    pub tools: Option<Vec<Tool>>,
    /// Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. Currently supported values are low, medium, and high.
    pub verbosity: Option<String>,
    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
    pub temperature: Option<f64>,
    /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
    pub max_completion_tokens: Option<i32>,
    /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
    pub top_logprobs: Option<i32>,
    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
    pub top_p: Option<f64>,
    /// Number between `-2.0` and `2.0`. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
    pub frequency_penalty: Option<f64>,
    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
    pub presence_penalty: Option<f64>,
    /// This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
    pub seed: Option<i32>,
    /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
    pub n: Option<i32>,
    /// Whether or not to store the output of this chat completion request for use in our model distillation or evals products.
    pub store: Option<bool>,
    /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.
    pub logprobs: Option<bool>,
    /// Whether to enable parallel function calling during tool use.
    pub parallel_tool_calls: Option<bool>,
    /// Whether to enable thinking mode for some Qwen models and providers.
    pub enable_thinking: Option<bool>,
    /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a `data: [DONE]` message.
    pub stream: Option<bool>,
}
