import { Expose, Type } from 'class-transformer';
import { ChatCompletionFinishReason, ChatCompletionMessage } from './message';
import { Usage } from './usage';

/**
 * The most likely tokens and their log probability, at this token position.
 */
export class TopLogProbs {
    /**
     * The token.
     */
    @Expose()
    token: string;
    /**
     * The log probability of the token.
     */
    @Expose()
    logprob: number;
    /**
     * A list of integers representing the UTF-8 bytes
     * representation of the token. Useful in instances where characters are represented by multiple
     * tokens and their byte representations must be combined to generate the correct
     * text representation. Can be null if there is no bytes representation for the
     * token.
     */
    @Expose({ name: 'bytes' })
    probBytes: number[];
}

/**
 * Message content tokens with log probability information.
 */
export class Content {
    /**
     * The token.
     */
    @Expose()
    token: string;
    /**
     * The log probability of the token.
     */
    @Expose()
    logprob: number;
    /**
     * A list of integers representing the UTF-8 bytes representation
     * of the token. Useful in instances where characters are represented by multiple
     * tokens and their byte representations must be combined to generate the correct
     * text representation. Can be null if there is no bytes representation for the
     * token.
     */
    @Expose({ name: 'bytes' })
    probBytes: number[];
    /**
     * List of the most likely tokens and their log probability, at
     * this token position. In rare cases, there may be fewer than the number of
     * requested top_logprobs returned.
     */
    @Expose({ name: 'top_logprobs' })
    topLogprobs: TopLogProbs[];
}

/**
 * Log probability information for the choice.
 */
export class LogProbs {
    /**
     * Message content tokens with log probability information.
     */
    @Type(() => Content)
    @Expose()
    content: Content[];
    /**
     * Message refusal tokens with log probability information.
     */
    @Type(() => Content)
    @Expose()
    refusal: Content[];
}

/**
 * Chat completion choice.
 */
export class Choice {
    /**
     * The reason the model stopped generating tokens.
     */
    @Expose({ name: 'finish_reason' })
    finishReason: ChatCompletionFinishReason;
    /**
     * The index of the choice in the list of choices.
     */
    @Expose()
    index: number;
    /**
     * A chat completion message generated by the model.
     */
    @Type(() => ChatCompletionMessage)
    message: ChatCompletionMessage;
    /**
     * Log probability information for the choice.
     */
    @Type(() => LogProbs)
    @Expose()
    logprobs: LogProbs;
}

/**
 * Represents a chat completion response returned by model, based on the provided
 * input.
 */
export class ChatCompletion {
    /**
     * A unique identifier for the chat completion.
     */
    @Expose()
    id: string;
    /**
     * A list of chat completion choices. Can be more than one if n is
     * greater than 1.
     */
    @Type(() => Choice)
    @Expose()
    choices: Choice[];
    /**
     * The Unix timestamp (in seconds) of when the chat completion was
     * created.
     */
    @Expose()
    created: number;
    /**
     * The model used for the chat completion.
     */
    @Expose()
    model: string;
    /**
     * The service tier used for processing the request. This field is
     * only included if the service_tier parameter is specified in the request.
     */
    @Expose({ name: 'service_tier' })
    serviceTier: string;
    /**
     * This fingerprint represents the backend configuration that
     * the model runs with. Can be used in conjunction with the seed request parameter to
     * understand when backend changes have been made that might impact determinism.
     */
    @Expose({ name: 'system_fingerprint' })
    systemFingerprint: string;
    /**
     * The object type, which is always chat.completion.
     */
    @Expose()
    object: string;
    /**
     * Usage statistics for the completion request.
     */
    @Type(() => Usage)
    @Expose()
    usage: Usage;
}

/**
 * Chat completion choice.
 */
export class ChunkChoice {
    /**
     * The reason the model stopped generating tokens.
     */
    @Expose({ name: 'finish_reason' })
    finishReason: ChatCompletionFinishReason;
    /**
     * The index of the choice in the list of choices.
     */
    @Expose()
    index: number;
    /**
     * A chat completion delta generated by streamed model responses.
     */
    @Type(() => ChatCompletionMessage)
    @Expose()
    delta: ChatCompletionMessage;
    /**
     * Log probability information for the choice.
     */
    @Type(() => LogProbs)
    @Expose()
    logprobs: LogProbs;
}

/**
 * Represents a streamed chunk of a chat completion response returned by model, based
 * on the provided input.
 */
export class ChatCompletionChunk {
    /**
     * A unique identifier for the chat completion. Each chunk has the same ID.
     */
    @Expose()
    id: string;
    /**
     * A list of chat completion choices. Can be more than one if n is
     * greater than 1.
     */
    @Type(() => ChunkChoice)
    @Expose()
    choices: ChunkChoice[];
    /**
     * The Unix timestamp (in seconds) of when the chat completion was
     * created. Each chunk has the same timestamp.
     */
    @Expose()
    created: number;
    /**
     * The model used for the chat completion.
     */
    @Expose()
    model: string;
    /**
     * The service tier used for processing the request. This field is
     * only included if the service_tier parameter is specified in the request.
     */
    @Expose({ name: 'service_tier' })
    serviceTier: string;
    /**
     * This fingerprint represents the backend configuration that
     * the model runs with. Can be used in conjunction with the seed request parameter to
     * understand when backend changes have been made that might impact determinism.
     */
    @Expose({ name: 'system_fingerprint' })
    systemFingerprint: string;
    /**
     * The object type, which is always 'chat.completion.chunk'.
     */
    @Expose()
    object: string;
    /**
     * Usage statistics for the completion request. Present in the last chunk
     * only if the StreamOptions.includeUsage is set to true.
     */
    @Type(() => Usage)
    @Expose()
    usage: Usage;
}
