package com.zenithmind.chat.service.impl;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.EncodingType;
import com.zenithmind.chat.config.AiConfig;
import com.zenithmind.chat.pojo.entity.AiChat;
import com.zenithmind.chat.pojo.entity.AiChatMessage;
import com.zenithmind.chat.pojo.entity.ModelConfig;
import com.zenithmind.chat.service.AiService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Primary;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Service;
import org.springframework.web.client.RestTemplate;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;   


@Slf4j
@Service
@Primary
@Qualifier("openAiService")
public class OpenAiServiceImpl implements AiService {
    
    @Autowired
    private AiConfig aiConfig;
    
    @Autowired
    private RestTemplate restTemplate;
    
    @Autowired
    private ObjectMapper objectMapper;
    
    // 创建编码注册表实例（单例）
    private static final EncodingRegistry registry = Encodings.newDefaultEncodingRegistry();
    
    @Override
    public AiChatMessage chat(AiChat chat, List<AiChatMessage> messages, String content) {
        try {
            // 获取模型配置
            ModelConfig modelConfig = aiConfig.getModelConfig(1, chat.getModelName());
            
            // 如果数据库中没有配置，则使用默认配置
            String apiUrl = aiConfig.getOpenAi().getApiUrl();
            String apiKey = aiConfig.getOpenAi().getApiKey();
            
            // 如果数据库中有配置，则使用数据库配置
            if (modelConfig != null) {
                if (modelConfig.getApiUrl() != null && !modelConfig.getApiUrl().isEmpty()) {
                    apiUrl = modelConfig.getApiUrl();
                }
                if (modelConfig.getApiKey() != null && !modelConfig.getApiKey().isEmpty()) {
                    apiKey = modelConfig.getApiKey();
                }
            }
            
            // 构建请求头
            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.APPLICATION_JSON);
            headers.setBearerAuth(apiKey);
            
            // 构建消息列表
            List<Map<String, String>> messageList = new ArrayList<>();
            
            // 添加系统提示词
            if (chat.getSystemPrompt() != null && !chat.getSystemPrompt().isEmpty()) {
                Map<String, String> systemMessage = new HashMap<>();
                systemMessage.put("role", "system");
                systemMessage.put("content", chat.getSystemPrompt());
                messageList.add(systemMessage);
            }
            
            // 添加历史消息
            for (AiChatMessage message : messages) {
                Map<String, String> messageMap = new HashMap<>();
                messageMap.put("role", message.getRole());
                messageMap.put("content", message.getContent());
                messageList.add(messageMap);
            }
            
            // 添加用户新消息
            Map<String, String> userMessage = new HashMap<>();
            userMessage.put("role", "user");
            userMessage.put("content", content);
            messageList.add(userMessage);
            
            // 构建请求体
            Map<String, Object> requestBody = new HashMap<>();
            requestBody.put("model", chat.getModelName());
            requestBody.put("messages", messageList);
            requestBody.put("temperature", 0.7);
            requestBody.put("max_tokens", 2000);
            
            // 发送请求
            String response = restTemplate.postForObject(
                apiUrl,
                new HttpEntity<>(requestBody, headers),
                String.class
            );
            
            // 解析响应
            Map<String, Object> responseMap = objectMapper.readValue(response, Map.class);
            List<Map<String, Object>> choices = (List<Map<String, Object>>) responseMap.get("choices");
            Map<String, Object> choice = choices.get(0);
            Map<String, String> message = (Map<String, String>) choice.get("message");
            
            // 获取响应中的真实token计数（如果存在）
            int tokenCount;
            if (responseMap.containsKey("usage") && ((Map<String, Object>)responseMap.get("usage")).containsKey("completion_tokens")) {
                tokenCount = ((Number)((Map<String, Object>)responseMap.get("usage")).get("completion_tokens")).intValue();
            } else {
                // 否则估算
                tokenCount = countTokens(message.get("content"));
            }
            
            // 创建AI回复消息
            AiChatMessage aiMessage = new AiChatMessage();
            aiMessage.setChatId(chat.getId());
            aiMessage.setRole("assistant");
            aiMessage.setContent(message.get("content"));
            aiMessage.setTokenCount(tokenCount);
            aiMessage.setStatus(1);
            
            return aiMessage;
            
        } catch (Exception e) {
            log.error("OpenAI API调用失败", e);
            throw new RuntimeException("AI服务暂时不可用，请稍后重试");
        }
    }
    
    @Override
    public AiChatMessage chatStream(AiChat chat, List<AiChatMessage> messages, String content, org.springframework.web.servlet.mvc.method.annotation.SseEmitter emitter) {
        ModelConfig modelConfig = aiConfig.getModelConfig(1, chat.getModelName());
        String apiUrl = modelConfig != null && modelConfig.getApiUrl() != null && !modelConfig.getApiUrl().isEmpty() 
                ? modelConfig.getApiUrl() 
                : aiConfig.getOpenAi().getApiUrl();
        String apiKey = modelConfig != null && modelConfig.getApiKey() != null && !modelConfig.getApiKey().isEmpty()
                ? modelConfig.getApiKey()
                : aiConfig.getOpenAi().getApiKey();

        try {
            // 发送初始思考事件
            sendSseEvent(emitter, "thought", "正在连接到 " + chat.getModelName() + " 模型...");
            sendSseEvent(emitter, "thought", "模型正在分析您的请求...");
            
            // 构建请求头
            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.APPLICATION_JSON);
            headers.setBearerAuth(apiKey);
            
            // 构建消息列表
            List<Map<String, String>> messageList = new ArrayList<>();
            
            // 添加系统提示词
            if (chat.getSystemPrompt() != null && !chat.getSystemPrompt().isEmpty()) {
                Map<String, String> systemMessage = new HashMap<>();
                systemMessage.put("role", "system");
                systemMessage.put("content", chat.getSystemPrompt());
                messageList.add(systemMessage);
            }
            
            // 添加历史消息
            for (AiChatMessage message : messages) {
                Map<String, String> messageMap = new HashMap<>();
                messageMap.put("role", message.getRole());
                messageMap.put("content", message.getContent());
                messageList.add(messageMap);
            }
            
            // 添加用户新消息
            Map<String, String> userMessage = new HashMap<>();
            userMessage.put("role", "user");
            userMessage.put("content", content);
            messageList.add(userMessage);
            
            // 构建请求体
            Map<String, Object> requestBody = new HashMap<>();
            requestBody.put("model", chat.getModelName());
            requestBody.put("messages", messageList);
            requestBody.put("temperature", chat.getTemperature() != null ? chat.getTemperature() : 0.7);
            requestBody.put("max_tokens", chat.getMaxTokens() != null ? chat.getMaxTokens() : 2000);
            requestBody.put("stream", true); // 设置为流式响应
            
            // 设置超时时间更短的RestTemplate，以便更快捕获超时问题
            RestTemplate timeoutRestTemplate = new RestTemplate();
            // 设置连接超时时间为10秒，读取超时为30秒
            java.time.Duration connectionTimeout = java.time.Duration.ofSeconds(10);
            java.time.Duration readTimeout = java.time.Duration.ofSeconds(30);
            
            timeoutRestTemplate.setRequestFactory(new org.springframework.http.client.SimpleClientHttpRequestFactory() {
                @Override
                public void setConnectTimeout(int timeout) {
                    super.setConnectTimeout((int) connectionTimeout.toMillis());
                }
                
                @Override
                public void setReadTimeout(int timeout) {
                    super.setReadTimeout((int) readTimeout.toMillis());
                }
            });
            
            // 处理流式响应
            StringBuilder fullResponseContent = new StringBuilder();
            final int[] totalTokens = {0};
            
            timeoutRestTemplate.execute(
                apiUrl,
                org.springframework.http.HttpMethod.POST,
                request -> {
                    request.getHeaders().putAll(headers);
                    objectMapper.writeValue(request.getBody(), requestBody);
                },
                response -> {
                    try (java.io.InputStream inputStream = response.getBody();
                         java.io.BufferedReader reader = new java.io.BufferedReader(new java.io.InputStreamReader(inputStream))) {
                        
                        StringBuilder inProgressContent = new StringBuilder();
                        boolean inThinkSection = false;
                        StringBuilder thinkContent = new StringBuilder();
                        
                        String line;
                        while ((line = reader.readLine()) != null) {
                            // OpenAI 流式响应以 "data: " 开头，后面是 JSON 数据
                            if (line.startsWith("data: ")) {
                                String jsonData = line.substring(6).trim(); // 去掉 "data: " 前缀
                                
                                // 特殊处理 [DONE] 标记
                                if (jsonData.equals("[DONE]")) {
                                    // 如果结束时仍在思考模式，发送剩余的思考内容
                                    if (inThinkSection && thinkContent.length() > 0) {
                                        sendSseEvent(emitter, "thought", thinkContent.toString());
                                    }
                                    
                                    sendSseEvent(emitter, "thought", "模型已完成响应生成。");
                                    break;
                                }
                                
                                try {
                                    Map<String, Object> chunkMap = objectMapper.readValue(jsonData, Map.class);
                                    if (chunkMap.containsKey("choices")) {
                                        List<Map<String, Object>> choices = (List<Map<String, Object>>) chunkMap.get("choices");
                                        if (!choices.isEmpty()) {
                                            Map<String, Object> choice = choices.get(0);
                                            if (choice.containsKey("delta")) {
                                                Map<String, Object> delta = (Map<String, Object>) choice.get("delta");
                                                if (delta.containsKey("content")) {
                                                    String contentChunk = (String) delta.get("content");
                                                    
                                                    // Always append the raw chunk to fullResponseContent first
                                                    fullResponseContent.append(contentChunk);
                                                    
                                                    inProgressContent.append(contentChunk);
                                                    String currentContent = inProgressContent.toString();
                                                    
                                                    // 处理<think>标签
                                                    if (currentContent.contains("<think>") && !inThinkSection) {
                                                        // 切换到思考模式
                                                        inThinkSection = true;
                                                        // 提取<think>标签前的内容(如果有)作为回答
                                                        int thinkStart = currentContent.indexOf("<think>");
                                                        if (thinkStart > 0) {
                                                            String answerBeforeThink = currentContent.substring(0, thinkStart);
                                                            sendSseEvent(emitter, "answer_chunk", answerBeforeThink);
                                                            // fullResponseContent.append(answerBeforeThink); // Already appended via contentChunk
                                                        }
                                                        thinkContent = new StringBuilder();
                                                        // 提取思考内容(如果<think>后面已经有内容)
                                                        if (currentContent.length() > thinkStart + 7) { // "<think>".length() = 7
                                                            String initialThinkContent = currentContent.substring(thinkStart + 7);
                                                            thinkContent.append(initialThinkContent);
                                                            
                                                            // 发送第一个思考块，实现真正的流式思考输出
                                                            sendSseEvent(emitter, "thought", initialThinkContent);
                                                            
                                                            if (initialThinkContent.contains("</think>")) {
                                                                // 如果同一个chunk中包含完整的<think>和</think>
                                                                int thinkEnd = initialThinkContent.indexOf("</think>");
                                                                
                                                                // 切换回回答模式
                                                                inThinkSection = false;
                                                                
                                                                // 如果</think>后面还有内容，发送为回答
                                                                if (initialThinkContent.length() > thinkEnd + 8) { // "</think>".length() = 8
                                                                    String answerAfterThink = initialThinkContent.substring(thinkEnd + 8);
                                                                    sendSseEvent(emitter, "answer_chunk", answerAfterThink);
                                                                    // fullResponseContent.append(answerAfterThink); // Already appended
                                                                }
                                                                
                                                                // 重置累积内容，因为已经处理完一对<think></think>标签
                                                                inProgressContent = new StringBuilder(
                                                                    initialThinkContent.substring(Math.min(initialThinkContent.length(), thinkEnd + 8))
                                                                );
                                                            }
                                                        }
                                                    } 
                                                    else if (inThinkSection) {
                                                        // 在思考模式中，累积思考内容
                                                        thinkContent.append(contentChunk);
                                                        
                                                        // 将每个思考块直接发送，实现真正的流式思考输出
                                                        sendSseEvent(emitter, "thought", contentChunk);
                                                        
                                                        String currentThinkContent = thinkContent.toString();
                                                        
                                                        // 检查是否有</think>标签
                                                        if (currentThinkContent.contains("</think>")) {
                                                            int thinkEnd = currentThinkContent.indexOf("</think>");
                                                            
                                                            // 切换回回答模式
                                                            inThinkSection = false;
                                                            
                                                            // 如果</think>后面还有内容，发送为回答
                                                            if (currentThinkContent.length() > thinkEnd + 8) {
                                                                String answerAfterThink = currentThinkContent.substring(thinkEnd + 8);
                                                                sendSseEvent(emitter, "answer_chunk", answerAfterThink);
                                                                // fullResponseContent.append(answerAfterThink); // Already appended
                                                            }
                                                            
                                                            // 重置累积内容，只保留</think>后尚未处理的部分
                                                            inProgressContent = new StringBuilder(
                                                                currentThinkContent.substring(Math.min(currentThinkContent.length(), thinkEnd + 8))
                                                            );
                                                        }
                                                    } 
                                                    else {
                                                        // 普通回答模式，发送内容块
                                                        sendSseEvent(emitter, "answer_chunk", contentChunk);
                                                        // fullResponseContent.append(contentChunk); // Already appended
                                                    }
                                                }
                                            }
                                            
                                            // 统计token使用情况
                                            if (choice.containsKey("finish_reason") && choice.get("finish_reason") != null) {
                                                // 流的最后一个消息通常包含finish_reason
                                                if (chunkMap.containsKey("usage") && ((Map<String, Object>)chunkMap.get("usage")).containsKey("completion_tokens")) {
                                                    totalTokens[0] = ((Number)((Map<String, Object>)chunkMap.get("usage")).get("completion_tokens")).intValue();
                                                }
                                            }
                                        }
                                    }
                                } catch (Exception e) {
                                    log.error("解析OpenAI流响应失败: {}", jsonData, e);
                                }
                            }
                        }
                    } catch (Exception e) {
                        log.error("读取OpenAI流响应失败", e);
                        try {
                            sendSseEvent(emitter, "error", "读取AI服务响应流失败: " + e.getMessage());
                        } catch (Exception ex) { /* ignore */ }
                        throw e; 
                    }
                    return null;
                }
            );
            
            emitter.complete();
            
            // The final content will include <think> tags and their content
            String finalContent = fullResponseContent.toString();
            
            // 创建返回的AI消息对象
            AiChatMessage aiMessage = new AiChatMessage();
            aiMessage.setChatId(chat.getId());
            aiMessage.setRole("assistant");
            aiMessage.setContent(finalContent.trim()); // Use the full content with thoughts and tags
            aiMessage.setTokenCount(totalTokens[0] > 0 ? totalTokens[0] : countTokens(finalContent));
            aiMessage.setStatus(1);
            
            return aiMessage;
            
        } catch (Exception e) {
            log.error("OpenAI流式API调用失败", e);
            if (emitter != null && !emitter.toString().contains("completed")) {
                try {
                    // 确保发送更多有用的信息给客户端，包括错误原因
                    String errorMessage = e.getMessage();
                    if (e instanceof java.net.ConnectException || 
                        (e.getCause() != null && e.getCause() instanceof java.net.ConnectException)) {
                        errorMessage = "连接OpenAI服务超时。请检查您的网络和API配置。";
                    }
                    
                    sendSseEvent(emitter, "thought", "AI服务连接失败，请检查网络和API配置");
                    sendSseEvent(emitter, "error", errorMessage);
                } catch (Exception ex) { /* ignore */ }
                emitter.completeWithError(e);
            }
            
            // 创建一个描述性更强的错误消息
            AiChatMessage errorResult = new AiChatMessage();
            errorResult.setChatId(chat.getId());
            errorResult.setRole("assistant");
            
            String errorContent;
            if (e instanceof java.net.ConnectException || 
                (e.getCause() != null && e.getCause() instanceof java.net.ConnectException)) {
                errorContent = "OpenAI服务连接超时。请检查您的网络连接和API配置。";
            } else {
                errorContent = "OpenAI流式处理失败: " + e.getMessage();
            }
            
            errorResult.setContent(errorContent);
            errorResult.setTokenCount(0);
            errorResult.setStatus(1);
            return errorResult;
        }
    }
    
    /**
     * 发送SSE事件
     */
    private void sendSseEvent(org.springframework.web.servlet.mvc.method.annotation.SseEmitter emitter, String type, String content) {        
        try {            
            Map<String, String> eventMap = new HashMap<>();            
            eventMap.put("type", type);            
            eventMap.put("content", content);                        
            String eventJson = objectMapper.writeValueAsString(eventMap);            
            
            // 思考内容使用INFO级别记录，确保在控制台可见
            if ("thought".equals(type)) {
                log.info("Sending thought event: content={}", content.substring(0, Math.min(50, content.length())) + (content.length() > 50 ? "..." : ""));
            } else {
                log.debug("Sending SSE event: type={}, content={}", type, content.substring(0, Math.min(50, content.length())) + (content.length() > 50 ? "..." : ""));
            }
                        
            emitter.send(org.springframework.web.servlet.mvc.method.annotation.SseEmitter.event().data(eventJson));        
        } catch (Exception e) {            
            log.warn("Failed to send SSE event: type={}, content={}", type, content, e);        
        }    
    }
    
    @Override
    public int countTokens(String content) {
        if (content == null || content.isEmpty()) {
            return 0;
        }
        
        try {
            // 根据模型获取合适的编码
            // 大多数最新的OpenAI模型使用cl100k_base编码
            Encoding encoding = registry.getEncoding(EncodingType.CL100K_BASE);
            
            // 编码文本并计算token数量
            List<Integer> tokens = encoding.encode(content);
            return tokens.size();
        } catch (Exception e) {
            log.error("Token计数失败", e);
            // 如果编码失败，使用简单的估计方法
            return estimateTokens(content);
        }
    }
    
    /**
     * 备用的token估算方法，当编码库失败时使用
     */
    private int estimateTokens(String content) {
        // 英文平均每个单词约1.3个token
        // 中文等其他语言平均每个字符约1个token
        // 这是一个粗略的估计，实际可能有所不同
        String[] words = content.split("\\s+");
        return (int) Math.ceil(words.length * 1.3);
    }
} 