import {
    BaseChatModel,
    type BaseChatModelParams,
} from "@langchain/core/language_models/chat_models";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, type BaseMessage } from "@langchain/core/messages";
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
import { createParser, type EventSourceMessage } from 'eventsource-parser'
import axios from "axios";
import { TokenUsage } from "./modelUsage";

interface CustomChatModelInput extends BaseChatModelParams {
    url: string;
    key: string;
    model: string;
    maxTokens?: number;
    temperature?: number;
    thinking?: "disabled" | "enabled" | "auto";
    isImageModel?: boolean;

    /**
     * 生成图像的宽高像素，要求介于 [512 x 512, 2048 x 2048] 之间
     *
     * @type {""}
     * @memberof CustomChatModelInput
     */
    size?: "1024x1024" | "864x1152" | "1152x864" | "1280x720" | "720x1280" | "832x1248" | "1248x832" | "1512x648" | string;
    /**
     * 随机数种子，用于控制模型生成内容的随机性。取值范围为 [-1, 2147483647]。如果不提供，则算法自动生成一个随机数作为种子。如果希望生成内容保持一致，可以使用相同的 seed 参数值。
     *
     * @type {number}
     * @memberof CustomChatModelInput
     */
    seed?: number;
    /**
     * 模型输出结果与prompt的一致程度，即生成图像的自由度；值越大，模型自由度越小，与用户输入的提示词相关性越强。取值范围：[1, 10] 之间的浮点数。
     *
     * @type {number}
     * @memberof CustomChatModelInput
     */
    guidance_scale?: number;

    /**
     * 是否在生成的图片中添加水印。
     * false：不添加水印。
     * true：在图片右下角添加“AI生成”字样的水印标识
     *
     * @type {boolean}
     * @memberof CustomChatModelInput
     */
    watermark?: boolean;
}

export interface ILLMModelChunk {
    msgType: string;
    text: string;
    fullText?: string;
    thinking?: string;
    fullThinking?: string;
    usage;
}

export class LLMModel extends BaseChatModel {
    url: string;
    key: string;
    model: string;
    thinkingRes = "";
    lastFullResponse: string = "";
    totalUsage = new TokenUsage();
    tool_calls = [];
    maxTokens?: number;
    temperature: number = 0.7;
    tools = [];
    thinking?: "disabled" | "enabled" | "auto";
    max_req_err = 3;
    size: string;
    seed: number;
    guidance_scale: number;
    watermark: boolean;
    prompt?: string;
    isAbort = false;
    image?: string[];
    isImgModel = false;

    OnStreamOut: (chunk: ILLMModelChunk) => void;
    private parser = createParser({
        onEvent: (event: EventSourceMessage) => {
            try {
                const data = JSON.parse(event.data);
                if (data && data.choices.length) {
                    const text = data.choices[0].delta.content;
                    const reasoning_content = data.choices[0].delta.reasoning_content;
                    if (reasoning_content) {
                        if (process.env.NODE_ENV == "dev") {
                            process.stdout.write('\x1b[34m' + reasoning_content + '\x1b[0m');
                        }
                        this.thinkingRes += reasoning_content;
                        data.thinking = reasoning_content;
                        // data.fullThinking = this.thinkingRes;
                    }
                    data.text = text;
                    if (process.env.NODE_ENV == "dev") {
                        process.stdout.write(text);
                    }
                    this.lastFullResponse += text;;

                    if (data.choices[0].delta.tool_calls) {
                        let tool_call = data.choices[0].delta.tool_calls[0];
                        console.log(`${tool_call.function.id} 工具名称：${tool_call.function.name}，参数：${tool_call.function.arguments}`);
                    }
                }

                if (data.usage) {
                    if (this.totalUsage) {
                        this.totalUsage.completion_tokens += data.usage.completion_tokens;
                        this.totalUsage.prompt_tokens += data.usage.prompt_tokens;
                        this.totalUsage.total_tokens += data.usage.total_tokens;
                    }
                    else {
                        this.totalUsage = data.usage;
                    }
                }
                this.OnStreamOut && this.OnStreamOut(data);
            } catch (error) {
                this.OnStreamOut && this.OnStreamOut({} as any);
            }
        }
    })

    // 将 LangChain 消息转换为 API 所需的格式
    private formatMessages(messages: BaseMessage[]): any[] {
        return messages.map((msg) => {
            if (msg instanceof HumanMessage) {
                return { role: "user", content: msg.content };
            } else if (msg instanceof SystemMessage) {
                return { role: "system", content: msg.content };
            } else if (msg instanceof AIMessage) {
                return { role: "assistant", content: msg.content };
            } else {
                throw new Error(`不支持的消息类型: ${msg._getType()}`);
            }
        });
    }
    constructor(fields: CustomChatModelInput) {
        super(fields);
        this.url = fields.url
        this.key = fields.key;
        this.model = fields.model;
        if (fields.maxTokens) {
            this.maxTokens = fields.maxTokens;
        }
        else {
            this.maxTokens = 4096 * 3;
        }

        if (fields.temperature) {
            this.temperature = fields.temperature;
        }

        if (!fields.thinking) {
            this.thinking = "disabled";
        }

        this.size = fields.size;
        this.seed = fields.seed;
        this.guidance_scale = fields.guidance_scale;
        this.watermark = fields.watermark;
        this.isImgModel = fields.isImageModel;
    }

    _llmType() {
        return "deepseek";
    }

    bind(params: any) {
        this.tools = params.tools.map(x => {
            return {
                type: "function",
                function: x
            }
        });
        return super.bind(params);
    }

    private async reqAiBase(messages: any[], stream: boolean = false) {
        const formattedMessages = this.formatMessages(messages);
        let reqData: any = {
            model: this.model,
            stream,
            max_tokens: this.maxTokens,
            temperature: this.temperature,
            messages: formattedMessages,
            stream_options: {
                include_usage: true
            },
            tools: this.tools,
            thinking: {
                type: this.thinking
            }
        }

        if (this.isImgModel) {
            reqData = {
                model: this.model,
                // size: this.size,
                seed: this.seed,
                guidance_scale: this.guidance_scale,
                watermark: false,
                response_format: "url",
                prompt: this.prompt
            }

            if (this.size) {
                reqData.size = this.size;
            }

            if (this.image && this.image.length > 0) {
                reqData.image = this.image;
            }
        }

        const defaultHeaders = {
            'Content-Type': 'application/json',
            'Authorization': `Bearer ${this.key}`
        };

        let cfg = {
            method: 'POST',
            url: this.url,
            data: reqData,
            headers: defaultHeaders
        } as any;


        if (stream) {
            cfg.responseType = 'stream';
        }

        let res;
        try {
            res = await axios.request(cfg);
        } catch (error) {
            console.log("请求失败：", this.model, this.max_req_err);
            console.log(error.response.data.error);
            throw error;
        }

        return res;
    }

    async _generate(messages: BaseMessage[],
        options: this["ParsedCallOptions"],
        runManager?: CallbackManagerForLLMRun): Promise<ChatResult> {
        const response = await this.reqAiBase(messages, false);
        let tools;
        let content;
        let usage = response.data.usage;

        if (response.data.choices) {
            content = response.data.choices[0].message.content;
            tools = response.data.choices[0].message.tool_calls;
            this.tool_calls = tools;
        }

        if (response.data.data) {
            content = JSON.stringify(response.data.data);
        }

        return {
            generations: [
                {
                    message: new AIMessage(content, { usage, tools }),
                    text: content,
                },
            ],
        };
    }

    async _call(
        messages: BaseMessage[],
        options: this["ParsedCallOptions"],
        runManager?: CallbackManagerForLLMRun
    ): Promise<string> {
        if (!messages.length) {
            throw new Error("No messages provided.");
        }
        // Pass `runManager?.getChild()` when invoking internal runnables to enable tracing
        // await subRunnable.invoke(params, runManager?.getChild());
        if (typeof messages[0].content !== "string") {
            throw new Error("Multimodal messages are not supported.");
        }

        const response = await this.reqAiBase(messages);
        return response.data;
    }

    async *_streamResponseChunks(
        messages: BaseMessage[],
        options: this["ParsedCallOptions"],
        runManager?: CallbackManagerForLLMRun
    ): AsyncGenerator<ChatGenerationChunk> {
        if (!messages.length) {
            throw new Error("No messages provided.");
        }
        // if (typeof messages[0].content !== "string") {
        //     throw new Error("Multimodal messages are not supported.");
        // }
        const response = await this.reqAiBase(messages, true);
        const stream = response.data;

        const decoder = new TextDecoder();
        for await (const chunk of stream) {
            if (this.isAbort) {
                break;
            }
            const textChunk = decoder.decode(chunk);
            this.parser.feed(textChunk);
            yield new ChatGenerationChunk({
                message: new AIMessageChunk({
                    content: textChunk
                }),
                text: textChunk,
            });
            runManager?.handleLLMNewToken(textChunk);
        }
    }
}