import { OpenAI } from "openai";
import { 
    ChatCompletionMessageParam,
    ChatCompletionCreateParamsNonStreaming,
    ChatCompletionCreateParamsStreaming,
    ChatCompletionChunk,
    ChatCompletion,
} from "openai/resources/chat/completions/completions";
import { Stream } from "openai/streaming";
import { ApiStream } from "./stream.js"
import { LLMErrorResponse } from "./types.js";


export interface LLMConfig {
    baseUrl: string;
    apiKey: string;
    model: string;
}

export class LLMClient {
    public config: LLMConfig;
    public openaiClient: OpenAI;
    
    public static async create(llmConfig: LLMConfig): Promise<LLMClient> {
        const llmClient = new LLMClient(llmConfig);
        return llmClient;
    }

    constructor(config: LLMConfig) {
        this.config = config;
        this.openaiClient = new OpenAI({
            baseURL: this.config.baseUrl,
            apiKey: this.config.apiKey
        });
    }

    public async *createChatStream(messages: Array<ChatCompletionMessageParam>): ApiStream {
        const param: ChatCompletionCreateParamsStreaming = {
            model: this.config.model,
            messages: messages,
            stream: true
        }
        
        const stream: Stream<ChatCompletionChunk> = await this.openaiClient.chat.completions.create(param);
        
        for await (const chunk of stream) {
			if ("error" in chunk) {
                const error = chunk.error as LLMErrorResponse["error"]
                console.error(`LLM API Error: ${error?.code} - ${error?.message}`)
                // Include metadata in the error message if available
                const metadataStr = error.metadata ? `\nMetadata: ${JSON.stringify(error.metadata, null, 2)}` : ""
                throw new Error(`LLM API Error ${error.code}: ${error.message}${metadataStr}`)
            }

			const delta = chunk.choices[0]?.delta
			if (delta?.content) {
				yield {
					type: "text",
					text: delta.content,
				}
			}

			// Reasoning tokens are returned separately from the content
			if ("reasoning" in delta && delta.reasoning) {
				yield {
					type: "reasoning",
					// @ts-ignore-next-line
					reasoning: delta.reasoning,
				}
			}
		}
    }

    public async createChat(messages: Array<ChatCompletionMessageParam>): Promise<string> {
        const param: ChatCompletionCreateParamsNonStreaming = {
            model: this.config.model,
            messages: messages,
            stream: false
        }
        
        const stream: ChatCompletion = await this.openaiClient.chat.completions.create(param);

        let responseStr = "";
        responseStr += stream.choices[0].message.content;
        return responseStr;
    }
}