import { BaseLLM } from "./baseLLM";
import { ChatMessage } from "../index";
import { streamSse } from "./stream";

/**
 * GLM (智谱AI) 模型实现类
 * 支持 GLM-4 系列模型
 */
export class GLM extends BaseLLM {
    apiKey: string;
    modelName: string;
    url: string;

    constructor(apiKey: string, modelName: string, url: string) {
        super();
        this.apiKey = apiKey;
        this.modelName = modelName;
        this.url = url;
    }
      
    protected _getHeaders() {
        return {
            "Content-Type": "application/json",
            "Authorization": `Bearer ${this.apiKey}`,
        };
    }

    protected _convertArgs(messages: ChatMessage[], maxTokens?: number) {
        return {
            messages: messages,
            model: this.modelName,
            ...(maxTokens && { max_tokens: maxTokens }),
        };
    }

    /**
     * 简单的 token 估算（中文约1.3 tokens/字，英文约1.3 tokens/词，代码略多）
     * 注意：这是粗略估算，实际消耗以 API 计费为准
     */
    private estimateTokens(text: string): number {
        // 中文字符数量（每个中文字符约 1.3-1.5 tokens）
        const chineseChars = (text.match(/[\u4e00-\u9fa5]/g) || []).length;
        // 英文单词数量（每个单词约 1.3 tokens）
        const englishWords = text.split(/\s+/).filter(w => /[a-zA-Z]/.test(w)).length;
        // 代码字符（代码通常 token 密度更高）
        const codeChars = (text.match(/[{}()\[\];,=<>!+\-*\/%&|^~]/g) || []).length;
        
        // 估算：中文 1.4 tokens/字，英文 1.3 tokens/词，代码符号 1 token/符号
        // 剩余字符按平均 1.5 tokens/字符估算
        const otherChars = text.length - chineseChars - codeChars;
        const estimated = Math.ceil(
            chineseChars * 1.4 +
            englishWords * 1.3 +
            codeChars * 1.0 +
            otherChars * 0.15  // 空格、标点等
        );
        return estimated;
    }

    public async *_legacystreamComplete(
        _messages: ChatMessage[],
        signal: AbortSignal,
        maxTokens?: number,
    ): AsyncGenerator<string> {
        // 🔧 默认为 8192 tokens，单元测试需要更长的输出
        const effectiveMaxTokens = maxTokens || 8192;
        const args = this._convertArgs(_messages, effectiveMaxTokens);
    
        // 📝 统计输入 tokens（估算）
        let totalInputChars = 0;
        let inputTokenEstimate = 0;
        for (const msg of _messages) {
            if (msg.content) {
                const contentLength = typeof msg.content === 'string' ? msg.content.length : JSON.stringify(msg.content).length;
                totalInputChars += contentLength;
                const msgTokens = this.estimateTokens(typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content));
                inputTokenEstimate += msgTokens;
            }
        }
    
        // 📝 打印实际调用的模型信息
        console.log(`[GLM] 🚀 正在调用模型: ${this.modelName}`);
        console.log(`[GLM] 📍 API URL: ${this.url}`);
        console.log(`[GLM] 📊 请求参数: model=${this.modelName}, max_tokens=${effectiveMaxTokens}`);
        console.log(`[GLM] 📥 输入统计: ${_messages.length}条消息, 约${(totalInputChars/1024).toFixed(1)}KB字符, 估算约${inputTokenEstimate.toLocaleString()}输入tokens`);
        console.log(`[GLM] 💰 预计总消耗: 输入~${inputTokenEstimate.toLocaleString()} + 输出最多${effectiveMaxTokens.toLocaleString()} = 约${(inputTokenEstimate + effectiveMaxTokens).toLocaleString()} tokens`);
        
        const requestBody = {
            ...args,
            stream: true,
        };
        console.log(`[GLM] 📤 请求体摘要: ${JSON.stringify({ ...requestBody, messages: `[${requestBody.messages.length}条消息]` })}`);
    
        const response = await this.fetch(new URL(this.url), {
            method: "POST",
            headers: this._getHeaders(),
            body: JSON.stringify(requestBody),
            signal,
        });
        
        console.log(`[GLM] 📥 收到响应: status=${response.status}, statusText=${response.statusText}`);
    
        // 统计实际输出的 tokens（需要累积所有文本）
        let outputText = '';
        let outputChunks = 0;
        for await (const value of streamSse(response)) {
            // GLM API 返回格式
            let text = value.choices?.[0]?.delta?.content;
            if (text) {
                outputText += text;
                outputChunks++;
                yield text;
            }
        }
        
        // 输出统计
        const outputChars = outputText.length;
        const outputTokenEstimate = this.estimateTokens(outputText);
        const totalEstimatedTokens = inputTokenEstimate + outputTokenEstimate;
        console.log(`[GLM] ✅ 生成完成统计:`);
        console.log(`[GLM]    - 输出: 约${(outputChars/1024).toFixed(1)}KB字符, 估算约${outputTokenEstimate.toLocaleString()}输出tokens`);
        console.log(`[GLM]    - 总消耗估算: 输入~${inputTokenEstimate.toLocaleString()} + 输出~${outputTokenEstimate.toLocaleString()} = 约${totalEstimatedTokens.toLocaleString()} tokens`);
        console.log(`[GLM]    - ⚠️ 注意: 这是粗略估算，实际消耗以API计费为准（通常输入tokens会被完整计费）`);
    }

    public async *_streamChat(
        messages: ChatMessage[],
        signal: AbortSignal,
        maxTokens?: number,
    ): AsyncGenerator<ChatMessage> {
        for await (const content of this._legacystreamComplete(
            messages,
            signal,
            maxTokens
        )) {
            yield {
                role: "assistant",
                content,
            };
        }
    }
}

