/*---------------------------------------------------------------------------------------------
 *  Copyright (c) Microsoft Corporation. All rights reserved.
 *  Licensed under the MIT License. See License.txt in the project root for license information.
 *--------------------------------------------------------------------------------------------*/

/**
 * Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint).
 *
 * Based (updated 31.10.2024) on https://platform.openai.com/docs/api-reference/completions/object
 * 		(! except `Choice#finish_reason` can be a null during streaming)
 */
export interface Completion {
	/** A unique identifier for the completion. */
	id: string;
	/** The list of completion choices the model generated for the input prompt. */
	choices: Completion.Choice[];
	/** The Unix timestamp (in seconds) of when the completion was created. */
	created: number;
	/** The model used for completion. */
	model: string;
	/** This fingerprint represents the backend configuration that the model runs with. */
	system_fingerprint: string;
	/** The object type, which is always "text_completion". */
	object: string;
	/** Usage statistics for the completion request. */
	usage: Completion.Usage | undefined;
}

export namespace Completion {
	export interface Choice {
		/** The index of the choice. */
		index: number;
		/** The reason the model stopped generating tokens. */
		finish_reason: FinishReason | null;
		/** The log probabilities of the tokens. */
		logprobs: LogProbs | null;
		/** The generated text. */
		text: string | undefined;
	}

	/**
	 * The reason the model stopped generating tokens.
	 */
	export enum FinishReason {
		/** If the model hit a natural stop point or a provided stop sequence. */
		Stop = 'stop',
		/** If the maximum number of tokens specified in the request was reached. */
		Length = 'length',
		/** If content was omitted due to a flag from our content filters. */
		ContentFilter = 'content_filter',
	}

	export type LogProbs = {
		/** The list of tokens generated by the model. */
		tokens: string[];
		/** The log probabilities of the tokens. */
		token_logprobs: number[];
		/** The text offsets of the tokens. */
		text_offset: number[];
		/** The top log probabilities of the tokens. */
		top_logprobs: Record<string, number>[];
	};

	export interface Usage {
		/** Number of tokens in the generated completion. */
		completion_tokens: number;
		/** Number of tokens in the prompt. */
		prompt_tokens: number;
		/** Total number of tokens used in the request (prompt + completion). */
		total_tokens: number;
		/** Breakdown of tokens used in a completion. */
		completion_tokens_details: TokensDetails;
		/** Breakdown of tokens used in the prompt. */
		prompt_tokens_details: TokensDetails;
	}

	export interface TokensDetails {
		/** Audio input tokens present in the prompt. */
		audio_tokens: number;
		/** Tokens generated by the model for reasoning. */
		reasoning_tokens: number;
	}
}
