import { ModelOptions, ModelRequest } from '../../model/model-protocol';
import { Message, MessageType } from '../message/message-protocol';

export const PromptTemplate = Symbol('PromptTemplate');

/**
 * The ChatOptions represent the common options, portable across different chat models.
 */
export interface ChatOptions extends ModelOptions {
    model?: string;
    frequencyPenalty?: number;
    maxTokens?: number;
    presencePenalty?: number;
    stopSequences?: string[];
    temperature?: number;
    topK?: number;
    topP?: number;
    prediction?: Prediction | null;
    extra?: Record<string, unknown>;
}

export interface Prediction {
    /**
     * The type of the predicted content you want to provide. This type is currently
     * always `content`.
     */
    type: 'content';

    /**
     * The content that should be matched when generating a model response. If
     * generated tokens would match this content, the entire model response can be
     * returned much more quickly.
     */
    content: PredictionContent[] | string;
}

export interface PredictionContent {
    text: string;
    type: 'text';
}

export interface Prompt extends ModelRequest<Message[]> {
    readonly contents: string;
    readonly options: ChatOptions;
    copy(): Prompt;
}

export interface PromptTemplateContext {
    variables?: Record<string, any>;
    chatOptions?: ChatOptions;
    messageType?: MessageType;
}

export interface PromptTemplate {
    render(template: string, ctx?: PromptTemplateContext): Promise<string>;
    create(template: string | Message[], ctx?: PromptTemplateContext): Promise<Prompt>;
}
