import { BaseLLM } from "./baseLLM";
import { ChatMessage } from "../index";
import { streamSse } from "./stream";

/**
 * Qwen (通义千问) 模型实现类
 * 支持通义千问系列模型，兼容 OpenAI API 格式
 */
export class Qwen extends BaseLLM {
    apiKey: string;
    modelName: string;
    url: string;

    constructor(apiKey: string, modelName: string, url: string) {
        super();
        this.apiKey = apiKey;
        this.modelName = modelName;
        this.url = url;
        
        // 输出初始化信息到控制台
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
        console.log('🚀 [Qwen LLM] 初始化');
        console.log(`📌 模型名称: ${this.modelName}`);
        console.log(`🔗 API 地址: ${this.url}`);
        console.log(`🔑 API Key: ${this.apiKey.substring(0, 8)}...`);
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
    }
      
    protected _getHeaders() {
        return {
            "Content-Type": "application/json",
            "Authorization": `Bearer ${this.apiKey}`,
        };
    }

    protected _convertArgs(messages: ChatMessage[], maxTokens?: number) {
        return {
            messages: messages,
            model: this.modelName,
            ...(maxTokens && { max_tokens: maxTokens }),
        };
    }

    public async *_legacystreamComplete(
        _messages: ChatMessage[],
        signal: AbortSignal,
        maxTokens?: number,
    ): AsyncGenerator<string> {
        const effectiveMaxTokens = maxTokens || 8192;
        const args = this._convertArgs(_messages, effectiveMaxTokens);
    
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
        console.log('📤 [Qwen LLM] 发送请求');
        console.log(`🎯 模型: ${this.modelName}`);
        console.log(`🔗 URL: ${this.url}`);
        console.log(`📨 消息数量: ${_messages.length}`);
        const lastMsg = _messages[_messages.length - 1]?.content;
        const msgPreview = typeof lastMsg === 'string' ? lastMsg.substring(0, 100) : JSON.stringify(lastMsg).substring(0, 100);
        console.log(`💬 最后一条用户消息: ${msgPreview}...`);
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
    
        const response = await this.fetch(new URL(this.url), {
            method: "POST",
            headers: this._getHeaders(),
            body: JSON.stringify({
                ...args,
                stream: true,
            }),
            signal,
        });
    
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
        console.log('✅ [Qwen LLM] 开始接收响应流');
        console.log(`📊 HTTP状态: ${response.status}`);
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
    
        let chunkCount = 0;
        for await (const value of streamSse(response)) {
            // Qwen API 兼容 OpenAI 格式
            let text = value.choices?.[0]?.delta?.content;
            if (text) {
                chunkCount++;
                if (chunkCount % 10 === 0) {
                    console.log(`📦 [Qwen LLM] 已接收 ${chunkCount} 个数据块...`);
                }
                yield text;
            }
        }
        
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
        console.log('🏁 [Qwen LLM] 响应流结束');
        console.log(`📊 总共接收: ${chunkCount} 个数据块`);
        console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
    }

    public async *_streamChat(
        messages: ChatMessage[],
        signal: AbortSignal,
        maxTokens?: number,
    ): AsyncGenerator<ChatMessage> {
        for await (const content of this._legacystreamComplete(
            messages,
            signal,
            maxTokens
        )) {
            yield {
                role: "assistant",
                content,
            };
        }
    }
}

