const axios = require('axios');
const vscode = require('vscode');

/**
 * 定义API流式数据类型
 * @typedef {AsyncGenerator<ApiStreamChunk>} ApiStream
 * @typedef {ApiStreamTextChunk|ApiStreamUsageChunk} ApiStreamChunk
 * 
 * @typedef {Object} ApiStreamTextChunk
 * @property {'text'} type
 * @property {string} text
 * 
 * @typedef {Object} ApiStreamUsageChunk
 * @property {'usage'} type
 * @property {number} inputTokens
 * @property {number} outputTokens
 */

class AIService {
    constructor() {
        this.currentResponse = null;
        this.selectedModel = "qwen-plus";
        this.enableReasoning = false;
        this.isStopped = false;
    }

    /**
     * Qwen模型流式响应
     */
    async streamQwenResponse(messages, model, enableReasoning, onData) {
        this.isStopped = false;
        const config = vscode.workspace.getConfiguration('qwenChat');
        const apiEndpoint = config.get('apiEndpoint') || "https://dashscope.aliyuncs.com/compatible-mode/v1";
        const apiKey = config.get('apiKey');
        if (!apiKey) throw new Error('请在设置中配置 Qwen API 密钥 (qwenChat.apiKey)');
        const OpenAI = require('openai');
        const openai = new OpenAI.OpenAI({ apiKey, baseURL: apiEndpoint });
        let reasoningContent = '';
        let answerContent = '';
        let isAnswering = false;
        let hasThinking = false;
        try {
            const params = {
                model,
                messages,
                stream: true,
                enable_thinking: enableReasoning
            };
            const stream = await openai.chat.completions.create(params);
            for await (const chunk of stream) {
                if (this.isStopped) {
                    onData("STREAM_END");
                    return;
                }
                if (!chunk.choices?.length) {
                    if (chunk.usage) {
                        onData("STREAM_END");
                    }
                    continue;
                }
                const delta = chunk.choices[0].delta;
                if (delta.reasoning_content !== undefined && delta.reasoning_content !== null) {
                    if (!isAnswering) {
                        onData("THINKING:" + delta.reasoning_content);
                        hasThinking = true;
                    }
                    reasoningContent += delta.reasoning_content;
                }
                if (delta.content !== undefined && delta.content) {
                    if (!isAnswering) {
                        isAnswering = true;
                        if (hasThinking) {
                            onData("THINKING_END");
                        }
                    }
                    onData(delta.content);
                    answerContent += delta.content;
                }
            }
            onData("STREAM_END");
        } catch (error) {
            if (!this.isStopped) {
                onData("STREAM_END");
                throw error;
            }
        }
    }

    /**
     * DeepSeek模型流式响应
     */
    async streamDeepSeekResponse(messages, model, enableReasoning, onData) {
        this.isStopped = false;
        const apiEndpoint = "https://api.deepseek.com";
        const apiKey = "sk-38295c5e85e34034ac65e10922dc2173";
        
        const OpenAI = require('openai');
        const openai = new OpenAI.OpenAI({ 
            apiKey: apiKey, 
            baseURL: apiEndpoint
        });

        const deepSeekModel = enableReasoning ? 'deepseek-reasoner' : 'deepseek-chat';
        
        try {
            // 处理消息，确保用户消息和助手消息交替出现
            let processedMessages = [];
            let lastRole = null;

            // 首先添加系统消息（如果存在）
            const systemMessage = messages.find(msg => msg.role === 'system');
            if (systemMessage) {
                processedMessages.push({
                    role: 'system',
                    content: systemMessage.content
                });
            }

            // 处理其他消息
            for (const msg of messages) {
                if (msg.role === 'system') continue; // 跳过系统消息，因为已经处理过了

                const role = msg.role === 'user' ? 'user' : 'assistant';
                
                // 如果当前消息的角色与上一条消息相同，则合并内容
                if (lastRole === role) {
                    processedMessages[processedMessages.length - 1].content += '\n' + msg.content;
                } else {
                    processedMessages.push({
                        role: role,
                        content: msg.content
                    });
                    lastRole = role;
                }
            }

            // 确保最后一条消息是用户消息
            if (processedMessages.length > 0 && processedMessages[processedMessages.length - 1].role !== 'user') {
                processedMessages.push({
                    role: 'user',
                    content: '请继续，如果任务完成，请使用<attempt_completion>标签标记任务完成,不要重复回答我'
                });
            }

            const response = await openai.chat.completions.create({
                model: deepSeekModel,
                messages: processedMessages,
                stream: true
            });

            let content = '';
            let accumulatedReasoningContent = '';
            let isAnswering = false;
            let hasThinking = false;

            for await (const chunk of response) {
                if (this.isStopped) {
                    onData("STREAM_END");
                    return content;
                }

                const chunkContent = chunk.choices[0]?.delta?.content || '';
                const currentReasoningContent = chunk.choices[0]?.delta && 'reasoning_content' in chunk.choices[0].delta 
                    ? chunk.choices[0].delta.reasoning_content 
                    : '';

                // 处理思考内容
                if (currentReasoningContent && deepSeekModel === 'deepseek-reasoner') {
                    if (!isAnswering) {
                        onData("THINKING:" + currentReasoningContent);
                        hasThinking = true;
                    }
                    accumulatedReasoningContent += currentReasoningContent;
                }
                // 处理普通内容
                else if (chunkContent) {
                    if (!isAnswering) {
                        isAnswering = true;
                        if (hasThinking) {
                            onData("THINKING_END");
                        }
                    }
                    content += chunkContent;
                    onData(chunkContent);
                }
            }

            if (accumulatedReasoningContent) {
                console.log("思考过程:", accumulatedReasoningContent);
            }
            onData("STREAM_END");
            return content;
        } catch (error) {
            if (!this.isStopped) {
                console.error('DeepSeek API调用错误:', error);
                onData("STREAM_END");
                throw error;
            }
            return content;
        }
    }

    /**
     * 统一流式响应入口，根据模型类型分发
     */
    async streamResponse(prompt, context = {}, onData) {
        const model = context.model || this.selectedModel || "qwen-plus";
        const enableReasoning = this.enableReasoning;
        const isDeepSeek = model === "deepseek-chat" || model === "deepseek-r1";
        const isQwen = model.startsWith('qwen') || model.includes('qwen');
        
        // 系统提示和消息
        const systemPrompt = context.systemPrompt;
        
        // 准备消息
        const messages = [
            { role: "system", content: systemPrompt }
        ];
        
        // 如果有编辑器信息，添加到系统消息中
        if (context.editorInfo) {
            messages[0].content += `\n\n编辑器上下文：\n${context.editorInfo}`;
        }
        
        // 处理历史消息，确保不会重复
        if (context.messages && Array.isArray(context.messages) && context.messages.length > 0) {
            // 过滤掉重复的消息
            const seenMessages = new Set();
            const filteredMessages = context.messages.filter(msg => {
                // 如果是工具调用结果，直接保留
                if (msg.role === 'function' || (typeof msg.content === 'object' && msg.content.type === 'tool_result')) {
                    return true;
                }
                
                // 对于文本消息，检查是否重复
                const key = `${msg.role}:${msg.content}`;
                if (seenMessages.has(key)) {
                    return false;
                }
                seenMessages.add(key);
                return true;
            });
            
            // 处理消息，将工具调用结果转换为XML格式
            const processedMessages = filteredMessages.map(msg => {
                // 如果是工具调用结果
                if (msg.role === 'function' || (typeof msg.content === 'object' && msg.content.type === 'tool_result')) {
                    const content = typeof msg.content === 'object' ? msg.content : JSON.parse(msg.content);
                    return {
                        role: 'assistant',
                        content: `<tool_result>
<tool>${content.tool}</tool>
<status>${content.approved ? '已执行' : '已拒绝'}</status>
<result>${content.approved ? 
    (content.result.success ? '成功' : '失败') + (content.result.error ? `: ${content.result.error}` : '') : 
    '用户拒绝: ' + (content.error || '未提供原因')}</result>
</tool_result>`
                    };
                }
                return msg;
            });
            
            // 添加处理后的消息
            messages.push(...processedMessages);
        } else if (prompt) {
            messages.push({ role: "user", content: prompt });
        }
        
        if (isQwen) {
            console.log(`Qwen模型流式响应:${enableReasoning}`)
            return await this.streamQwenResponse(messages, model, enableReasoning, onData);
        } else if (isDeepSeek) {
            return await this.streamDeepSeekResponse(messages, model, enableReasoning, onData);
        } else {
            throw new Error('暂不支持的模型类型');
        }
    }

    /**
     * 创建消息（直接调用模型获取回复）
     * @param {string} systemPrompt - 系统提示词
     * @param {Array} messages - 消息历史
     * @param {Function} onData - 数据回调函数
     * @returns {Promise<string>} - 任务ID
     */
    async createMessage(systemPrompt, messages, onData) {
        // 获取模型信息
        const model = this.selectedModel || "qwen-plus";
        
        // 构建上下文
        const context = {
            systemPrompt,
            messages,
            model,
            enableReasoning: this.enableReasoning
        };
        
        // 调用streamResponse处理消息
        return await this.streamResponse(null, context, onData);
    }

    // 停止响应
    async stopResponse() {
        try {
            this.isStopped = true;
            if (this.currentResponse) {
                this.currentResponse.data.destroy();
                this.currentResponse = null;
            }
            return true;
        } catch (error) {
            console.error('Error stopping response:', error);
            throw error;
        }
    }

    /**
     * 清除对话历史记录
     * @returns {boolean} - 是否成功清除
     */
    clearConversationHistory() {
        try {
            console.log('清除对话历史');
            return true;
        } catch (error) {
            console.error('清除对话历史失败:', error);
            return false;
        }
    }
    
    /**
     * 获取API流使用情况
     * @returns {Promise<{inputTokens: number, outputTokens: number}>}
     */
    async getApiStreamUsage() {
        // 这里返回预估的token使用量，实际应用中应当根据API响应返回
        return {
            inputTokens: 0,
            outputTokens: 0
        };
    }
}

module.exports = AIService;