import {
    ChatCompletionMessageParam,
    ChatCompletionSystemMessageParam,
    ChatCompletionUserMessageParam,
    ChatCompletionAssistantMessageParam,
} from 'openai/resources/chat/completions/completions';
import { LLMClient, LLMConfig } from './llm.js';
import { MCPClient, MCPConfig } from './mcp/mcp.js';
import { systemPrompt, SystemPromptConfig } from './prompts.js';
import { AssistantMessageContent } from './assistant-message/index.js'
import { parseAssistantMessage } from './assistant-message/parse-assistant-message.js'

import readline from "readline/promises";
import * as util from 'util';

export class Agent {
    private mcpClient: MCPClient;
    private llmClient: LLMClient; 
    private messages: Array<ChatCompletionMessageParam> = new Array(0);
    private assistantMessageContent: AssistantMessageContent[] = []

    public static async create(mcpConfig: MCPConfig, llmConfig: LLMConfig): Promise<Agent> {
        const mcpClient = await MCPClient.create(mcpConfig);
        const llmClient = await LLMClient.create(llmConfig);
        const agent = new Agent(mcpClient, llmClient);

        await agent.buildSystemPrompt();

        return agent;
    }

    constructor(mcpClient: MCPClient, llmClient: LLMClient) {
        this.mcpClient = mcpClient;
        this.llmClient = llmClient;
    }
    
    private async buildSystemPrompt(): Promise<void> {
        const systemPromptConfig: SystemPromptConfig = {
            mcpClient: this.mcpClient
        }
        const systemPromptStr = systemPrompt(systemPromptConfig);
        const systemMessage: ChatCompletionSystemMessageParam = {
            role: "system",
            content: systemPromptStr
        };
        console.log(systemPromptStr);
        this.messages.push(systemMessage);
    }

    public async cleanup(): Promise<void> {
        await this.mcpClient.cleanup();
    }

    public async chatLoop(): Promise<void> {
        const rl = readline.createInterface({
            input: process.stdin,
            output: process.stdout,
        });

        try {
            console.log("\nAgent Client Started!");
            console.log("Type your queries or 'quit' to exit.");

            while (true) {
                const userContent = await rl.question(`\n\x1b[31mUser[${this.messages.length - 1}]:\x1b[0m `);
                if (userContent.toLowerCase() === "quit") {
                    break;
                }
                const userMessage: ChatCompletionUserMessageParam = {
                    role: "user",
                    content: userContent
                }
                this.messages.push(userMessage);

                console.log("\n\x1b[32mAssistant:\x1b[0m ");
                const assistantStream = await this.llmClient.createChatStream(this.messages);
                let assistantContent = ""
			    let reasoningContent = ""
                for await (const chunk of assistantStream) {
					if (!chunk) {
						continue
					}
					switch (chunk.type) {
						case "usage":
							break
						case "reasoning":
							// reasoning will always come before assistant message
							reasoningContent += chunk.reasoning
							break
						case "text":
							assistantContent += chunk.text
                            process.stdout.write(util.format(chunk.text));
                            this.assistantMessageContent = parseAssistantMessage(assistantContent);
							break
					}
				}
                process.stdout.write(util.format("\n"));
                console.log("\nassistant message block size: " + this.assistantMessageContent.length);
                const assistantMessage: ChatCompletionAssistantMessageParam = {
                    role: "assistant",
                    content: assistantContent
                };
                this.messages.push(assistantMessage);
            }
        } finally {
            rl.close();
        }
    }
}