package com.sqlcheck.service.impl;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sqlcheck.config.LLMConfiguration;
import com.sqlcheck.dto.request.LLMOptimizeRequest;
import com.sqlcheck.dto.response.LLMApiResponse;
import com.sqlcheck.service.LLMApiService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.http.*;
import org.springframework.stereotype.Service;
import org.springframework.web.client.RestTemplate;

import java.util.HashMap;
import java.util.List;
import java.util.Map;

@Service
@RequiredArgsConstructor
@Slf4j
public class LLMApiServiceImpl implements LLMApiService {

    private final LLMConfiguration llmConfiguration;

    @Qualifier("http2RestTemplate")
    private final RestTemplate http2RestTemplate; // 支持HTTP/2的RestTemplate

    private final RestTemplate restTemplate; // 默认RestTemplate用于其他API
    private final ObjectMapper objectMapper;

    @Override
    public LLMApiResponse callLLM(String prompt, LLMOptimizeRequest.LLMConfig config) {
        log.debug("调用大模型API，提供商: {}, 模型: {}", config.getProvider(), config.getModelName());

        try {
            long startTime = System.currentTimeMillis();

            // 获取提供商配置
            LLMConfiguration.ProviderConfig providerConfig = llmConfiguration.getProvider(config.getProvider());
            if (providerConfig == null) {
                return LLMApiResponse.error("未找到提供商配置: " + config.getProvider());
            }

            // 根据不同提供商调用不同的API
            LLMApiResponse response;
            switch (config.getProvider().toLowerCase()) {
                case "openai":
                    response = callOpenAI(prompt, config, providerConfig);
                    break;
                case "claude":
                    response = callClaude(prompt, config, providerConfig);
                    break;
                case "custom":
                    response = callCustomAPI(prompt, config, providerConfig);
                    break;
                default:
                    return LLMApiResponse.error("不支持的提供商: " + config.getProvider());
            }

            if (response != null && response.isSuccess()) {
                long processingTime = System.currentTimeMillis() - startTime;
                response.setProcessingTime(processingTime);
            }

            return response;

        } catch (Exception e) {
            log.error("LLM API调用失败", e);
            return LLMApiResponse.error("API调用异常: " + e.getMessage());
        }
    }

    @Override
    public boolean testConnection(LLMOptimizeRequest.LLMConfig config) {
        log.debug("测试大模型连接，提供商: {}", config.getProvider());

        try {
            // 发送简单的测试请求
            LLMApiResponse response = callLLM("测试连接", config);
            return response != null && response.isSuccess();
        } catch (Exception e) {
            log.error("测试连接失败", e);
            return false;
        }
    }

    /**
     * 调用OpenAI API
     */
    private LLMApiResponse callOpenAI(String prompt, LLMOptimizeRequest.LLMConfig config,
            LLMConfiguration.ProviderConfig providerConfig) {

        String apiUrl = providerConfig.getApiUrl();
        String apiKey = providerConfig.getApiKey();

        if (apiKey == null || apiKey.trim().isEmpty()) {
            return LLMApiResponse.error("OpenAI API密钥未配置");
        }

        try {
            // 构建请求体
            Map<String, Object> requestBody = new HashMap<>();
            requestBody.put("model", config.getModelName());
            requestBody.put("messages", List.of(
                    Map.of("role", "user", "content", prompt)));
            requestBody.put("temperature", config.getTemperature() != null ? config.getTemperature() : 0.1);
            requestBody.put("max_tokens", 4000);

            // 设置请求头
            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.APPLICATION_JSON);
            headers.setBearerAuth(apiKey);

            HttpEntity<Map<String, Object>> entity = new HttpEntity<>(requestBody, headers);

            // 发送请求
            ResponseEntity<String> response = restTemplate.exchange(
                    apiUrl, HttpMethod.POST, entity, String.class);

            if (response.getStatusCode() == HttpStatus.OK) {
                return parseOpenAIResponse(response.getBody(), config);
            } else {
                return LLMApiResponse.error("OpenAI API响应错误: " + response.getStatusCode());
            }

        } catch (Exception e) {
            log.error("调用OpenAI API失败", e);
            return LLMApiResponse.error("OpenAI API调用失败: " + e.getMessage());
        }
    }

    /**
     * 调用Claude API
     */
    private LLMApiResponse callClaude(String prompt, LLMOptimizeRequest.LLMConfig config,
            LLMConfiguration.ProviderConfig providerConfig) {

        String apiUrl = providerConfig.getApiUrl();
        String apiKey = providerConfig.getApiKey();

        if (apiKey == null || apiKey.trim().isEmpty()) {
            return LLMApiResponse.error("Claude API密钥未配置");
        }

        try {
            // 构建请求体
            Map<String, Object> requestBody = new HashMap<>();
            requestBody.put("model", config.getModelName());
            requestBody.put("max_tokens", 4000);
            requestBody.put("messages", List.of(
                    Map.of("role", "user", "content", prompt)));

            // 设置请求头
            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.APPLICATION_JSON);
            headers.set("x-api-key", apiKey);
            headers.set("anthropic-version", "2023-06-01");

            HttpEntity<Map<String, Object>> entity = new HttpEntity<>(requestBody, headers);

            // 发送请求
            ResponseEntity<String> response = restTemplate.exchange(
                    apiUrl, HttpMethod.POST, entity, String.class);

            if (response.getStatusCode() == HttpStatus.OK) {
                return parseClaudeResponse(response.getBody(), config);
            } else {
                return LLMApiResponse.error("Claude API响应错误: " + response.getStatusCode());
            }

        } catch (Exception e) {
            log.error("调用Claude API失败", e);
            return LLMApiResponse.error("Claude API调用失败: " + e.getMessage());
        }
    }

    /**
     * 调用自定义API
     */
    private LLMApiResponse callCustomAPI(String prompt, LLMOptimizeRequest.LLMConfig config,
            LLMConfiguration.ProviderConfig providerConfig) {
        log.info("=== 开始调用自定义大模型服务商 API ===");
        log.info("请求配置 - 提供商: {}, 模型: {}, API地址: {}",
                config.getProvider(), config.getModelName(), providerConfig.getApiUrl());

        try {
            String apiUrl = providerConfig.getApiUrl();
            String apiKey = providerConfig.getApiKey();

            // 处理local-qwen3-32b模型的非推理模式
            String processedPrompt = prompt;
            if ("local-qwen3-32b".equals(config.getModelName())) {
                // 对于local-qwen3-32b模型，在提示词前添加/no_think前缀启用非推理模式
                if (!prompt.startsWith("/no_think")) {
                    processedPrompt = "/no_think " + prompt;
                    log.info("为local-qwen3-32b模型添加非推理模式前缀: /no_think");
                }
            }

            // 构建请求体 - 按照成功示例的格式
            Map<String, Object> requestBody = new HashMap<>();
            requestBody.put("model", config.getModelName());
            requestBody.put("messages", List.of(
                    Map.of("role", "user", "content", processedPrompt)));
            // 使用与成功示例相似的参数
            requestBody.put("temperature", config.getTemperature() != null ? config.getTemperature() : 0.95);
            requestBody.put("top_p", 0.7); // 使用成功示例中的值
            requestBody.put("max_tokens", config.getMaxTokens() != null ? config.getMaxTokens() : 4000);
            requestBody.put("stream", false);

            // 合并 additional parameters 到请求体（可选）
            if (providerConfig.getAdditionalParams() != null && !providerConfig.getAdditionalParams().isEmpty()) {
                requestBody.putAll(providerConfig.getAdditionalParams());
            }
            if (config.getParameters() != null && !config.getParameters().isEmpty()) {
                requestBody.putAll(config.getParameters());
            }

            // 设置请求头（支持从配置文件与请求中传入）
            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.APPLICATION_JSON);

            headers.set("Connection", "keep-alive");
            headers.set("User-Agent", "Apifox/1.0.0 (https://apifox.com)");
            headers.set("Cache-Control", "no-cache");

            // 预先序列化请求体以计算Content-Length
            String requestBodyJson = objectMapper.writeValueAsString(requestBody);

            // 1) 先应用提供商级别的headers
            if (providerConfig.getHeaders() != null) {
                providerConfig.getHeaders().forEach(headers::set);
            }
            // 2) 再应用本次请求自定义headers（可覆盖提供商级别）
            if (config.getHeaders() != null) {
                config.getHeaders().forEach(headers::set);
            }

            // 如果没有显式提供 Authorization，且存在 apiKey，但配置中已有自定义认证头，则不使用Bearer
            boolean hasCustomAuth = (providerConfig.getHeaders() != null &&
                    (providerConfig.getHeaders().containsKey("apiKey") ||
                            providerConfig.getHeaders().containsKey("Authorization")))
                    ||
                    (config.getHeaders() != null &&
                            (config.getHeaders().containsKey("apiKey") ||
                                    config.getHeaders().containsKey("Authorization")));

            if (!headers.containsKey(HttpHeaders.AUTHORIZATION) &&
                    !hasCustomAuth &&
                    apiKey != null && !apiKey.trim().isEmpty()) {
                headers.setBearerAuth(apiKey);
                log.debug("使用Bearer认证方式");
            } else if (hasCustomAuth) {
                log.debug("使用自定义认证头：apiKey/appId等");
            }

            // 注释掉可能导致冲突的特殊头
            // headers.putIfAbsent("X-DashScope-SSE", List.of("disable"));

            // 创建HttpEntity
            HttpEntity<?> entity;
            entity = new HttpEntity<>(requestBodyJson, headers);

            // 详细记录请求信息
            log.info("请求URL: {}", apiUrl);
            log.info("请求头: {}", headers);
            log.info("请求体: {}", requestBodyJson);

            log.info("原始提示词长度: {} 字符，处理后提示词长度: {} 字符", prompt.length(), processedPrompt.length());
            if (log.isDebugEnabled()) {
                log.debug("原始提示词内容:\n{}", prompt);
                if (!prompt.equals(processedPrompt)) {
                    log.debug("处理后提示词内容:\n{}", processedPrompt);
                }
            }

            long requestStartTime = System.currentTimeMillis();

            // 发送请求
            log.info("正在发送HTTP/1.1请求到自定义API...");

            ResponseEntity<String> response = restTemplate.exchange(
                    apiUrl, HttpMethod.POST, entity, String.class);

            long requestEndTime = System.currentTimeMillis();
            long networkTime = requestEndTime - requestStartTime;

            // 详细记录响应信息
            log.info("响应状态码: {}", response.getStatusCode());
            log.info("响应头: {}", response.getHeaders());
            log.info("网络请求耗时: {} ms", networkTime);

            String responseBody = response.getBody();
            log.info("响应体长度: {} 字符", responseBody != null ? responseBody.length() : 0);
            log.info("完整响应体: {}", responseBody);

            if (response.getStatusCode() == HttpStatus.OK) {
                LLMApiResponse result = parseCustomApiResponse(responseBody, config);
                if (result != null && result.isSuccess()) {
                    log.info("API调用成功！生成内容长度: {} 字符",
                            result.getContent() != null ? result.getContent().length() : 0);
                    log.info("Token使用量: {}", result.getTokensUsed());
                    log.info("=== 自定义API调用完成 ===");
                } else {
                    log.error("API响应解析失败: {}", result != null ? result.getErrorMessage() : "unknown");
                }
                return result;
            } else {
                String errorMsg = "Custom API响应错误: " + response.getStatusCode() +
                        ", 响应内容: "
                        + (responseBody != null ? responseBody.substring(0, Math.min(responseBody.length(), 1000))
                                : "null");
                log.error(errorMsg);

                return LLMApiResponse.error(errorMsg);
            }

        } catch (Exception e) {
            log.error("调用Custom API失败", e);
            return LLMApiResponse.error("Custom API调用失败: " + e.getMessage());
        }
    }

    /**
     * 解析OpenAI API响应
     */
    private LLMApiResponse parseOpenAIResponse(String responseBody, LLMOptimizeRequest.LLMConfig config) {
        try {
            JsonNode root = objectMapper.readTree(responseBody);
            JsonNode choices = root.path("choices");

            if (choices.isArray() && choices.size() > 0) {
                String content = choices.get(0).path("message").path("content").asText();
                JsonNode usage = root.path("usage");

                LLMApiResponse response = LLMApiResponse.success(content, config.getProvider(), config.getModelName());
                if (!usage.isMissingNode()) {
                    response.setTokensUsed(usage.path("total_tokens").asInt());
                }

                return response;
            } else {
                return LLMApiResponse.error("OpenAI API响应格式错误");
            }
        } catch (Exception e) {
            log.error("解析OpenAI响应失败", e);
            return LLMApiResponse.error("解析响应失败: " + e.getMessage());
        }
    }

    /**
     * 解析Claude API响应
     */
    private LLMApiResponse parseClaudeResponse(String responseBody, LLMOptimizeRequest.LLMConfig config) {
        try {
            JsonNode root = objectMapper.readTree(responseBody);
            JsonNode content = root.path("content");

            if (content.isArray() && content.size() > 0) {
                String text = content.get(0).path("text").asText();
                JsonNode usage = root.path("usage");

                LLMApiResponse response = LLMApiResponse.success(text, config.getProvider(), config.getModelName());
                if (!usage.isMissingNode()) {
                    response.setTokensUsed(usage.path("input_tokens").asInt() + usage.path("output_tokens").asInt());
                }

                return response;
            } else {
                return LLMApiResponse.error("Claude API响应格式错误");
            }
        } catch (Exception e) {
            log.error("解析Claude响应失败", e);
            return LLMApiResponse.error("解析响应失败: " + e.getMessage());
        }
    }

    /**
     * 解析自定义APIAPI响应
     */
    private LLMApiResponse parseCustomApiResponse(String responseBody, LLMOptimizeRequest.LLMConfig config) {
        log.info("开始解析自定义APIAPI响应...");

        try {
            JsonNode root = objectMapper.readTree(responseBody);
            log.info("解析后的JSON结构: {}", root.toPrettyString());

            // 检查是否有错误
            if (root.has("error")) {
                JsonNode error = root.get("error");
                String errorMsg = error.has("message") ? error.get("message").asText() : "未知错误";
                String errorCode = error.has("code") ? error.get("code").asText() : "unknown";
                log.error("API返回错误 - 错误码: {}, 错误信息: {}", errorCode, errorMsg);
                return LLMApiResponse.error("API错误: " + errorCode + " - " + errorMsg);
            }

            // 解析成功响应 - 兼容OpenAI格式和阿里云原生格式
            String content = "";

            // 优先尝试OpenAI兼容格式（choices字段在根级别）
            JsonNode choices = root.path("choices");
            if (choices.isArray() && choices.size() > 0) {
                JsonNode firstChoice = choices.get(0);
                JsonNode message = firstChoice.path("message");
                content = message.path("content").asText();
                log.info("使用OpenAI兼容格式解析，提取到的内容长度: {} 字符", content.length());
                if (log.isDebugEnabled()) {
                    log.debug("提取到的内容:\n{}", content);
                }
            } else {
                // 尝试阿里云原生格式（output.choices）
                JsonNode output = root.path("output");
                if (!output.isMissingNode()) {
                    JsonNode outputChoices = output.path("choices");
                    if (outputChoices.isArray() && outputChoices.size() > 0) {
                        JsonNode firstChoice = outputChoices.get(0);
                        JsonNode message = firstChoice.path("message");
                        content = message.path("content").asText();
                        log.info("使用阿里云原生格式解析，提取到的内容长度: {} 字符", content.length());
                    } else {
                        // 尝试直接从output.text获取
                        content = output.path("text").asText();
                        if (!content.isEmpty()) {
                            log.info("使用阿里云text格式解析，提取到的内容长度: {} 字符", content.length());
                        }
                    }
                }

                if (content.isEmpty()) {
                    log.error("无法从响应中提取内容，尝试了OpenAI格式和阿里云原生格式");
                    return LLMApiResponse.error("响应格式错误：无法提取内容");
                }
            }

            // 解析使用量信息
            JsonNode usage = root.path("usage");
            int tokensUsed = 0;
            if (!usage.isMissingNode()) {
                tokensUsed = usage.path("total_tokens").asInt(0);
                int inputTokens = usage.path("input_tokens").asInt(0);
                int outputTokens = usage.path("output_tokens").asInt(0);
                log.info("Token使用详情 - 输入: {}, 输出: {}, 总计: {}", inputTokens, outputTokens, tokensUsed);
            }

            // 构建成功响应
            LLMApiResponse response = LLMApiResponse.success(content, config.getProvider(), config.getModelName());
            response.setTokensUsed(tokensUsed);

            // 估算成本（价格）
            double cost = tokensUsed * 0.0001; // 假设每1000 token 0.1元，即每token 0.0001元
            response.setCost(cost);

            log.info("响应解析成功 - 内容长度: {}, Token使用: {}, 估算成本: ¥{}",
                    content.length(), tokensUsed, String.format("%.4f", cost));

            return response;

        } catch (Exception e) {
            log.error("解析API响应失败", e);
            return LLMApiResponse.error("响应解析失败: " + e.getMessage());
        }
    }

    /**
     * 从提示词中提取SQL语句
     */
    private String extractSqlFromPrompt(String prompt) {
        // 简单的SQL提取逻辑
        String[] lines = prompt.split("\n");
        StringBuilder sql = new StringBuilder();
        boolean inSqlSection = false;

        for (String line : lines) {
            if (line.contains("SQL语句:") || line.contains("原始SQL:")) {
                inSqlSection = true;
                continue;
            }
            if (inSqlSection && (line.contains("规则:") || line.contains("要求:"))) {
                break;
            }
            if (inSqlSection && !line.trim().isEmpty()) {
                sql.append(line).append("\n");
            }
        }

        return sql.toString().trim();
    }

}