package com.pai4j.aigc.llm;

import com.pai4j.aigc.llm.entity.LLMModelEntity;
import com.pai4j.aigc.llm.service.LLMModelService;
import com.pai4j.aigc.llm.service.LLMUsageService;
import com.pai4j.aigc.llm.service.TokenEstimator;
import com.pai4j.aigc.llm.service.PricingService;
import com.pai4j.aigc.llm.service.MetricsService;
import com.pai4j.common.enums.LlmModelEnum;
import com.pai4j.domain.dto.aigc.ChatCompletionMessageRequest;
import com.pai4j.domain.vo.llm.ChatCompletionMessage;
import com.pai4j.domain.vo.llm.ChatCompletionResponse;
import com.pai4j.domain.vo.llm.ChatMessageRole;
import com.pai4j.domain.vo.llm.Usage;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;

import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.UUID;

@Service
public class LlmChatService {

    @Value("${llm.config.model:kimi}")
    private String modal;

    @Autowired
    private LLMModelService modelService;

    @Autowired
    private LLMUsageService usageService;

    @Autowired
    private TokenEstimator tokenEstimator;

    @Autowired
    private PricingService pricingService;

    @Autowired
    private MetricsService metricsService;

    /**
     * 处理聊天请求并返回完整的聊天响应内容。
     * <p>
     * 该方法根据租户和用户信息查找绑定的模型配置，若未找到则使用默认模型或全局配置。
     * 然后调用对应的 LLM 服务执行聊天请求，并记录使用情况、耗时和成本等信息。
     *
     * @param chatCompletionMessageRequest 包含聊天上下文、租户ID、用户ID等信息的请求对象
     * @return 聊天模型返回的完整响应内容
     */
    public String chat(ChatCompletionMessageRequest chatCompletionMessageRequest) {
        final List<ChatCompletionMessage> messageList = Arrays.asList(
                new ChatCompletionMessage(ChatMessageRole.USER.value(), chatCompletionMessageRequest.getPrompt())
        );

        String tenantId = chatCompletionMessageRequest.getTenantId();
        String userId = chatCompletionMessageRequest.getUserId();
        String requestId = UUID.randomUUID().toString();

        // 解析模型：优先用户绑定 -> 租户默认 -> 全局 code -> application.yml 固定 modal
        Optional<LLMModelEntity> userModelOpt = Optional.empty();
        if (tenantId != null && userId != null) {
            userModelOpt = modelService.findActiveModelForUser(tenantId, userId);
        }
        LLMModelEntity modelEntity = userModelOpt
                .or(() -> tenantId != null ? modelService.getDefaultModel(tenantId) : Optional.empty())
                .or(() -> modelService.findByGlobalCode(modal))
                .orElse(null);
        // providerKey 默认为 application.yml 配置，若存在模型实体则以实体 provider 为准
        String providerKey = modal;
        LLMRuntimeConfig runtimeConfig = null;
        Long modelId = null;
        if (modelEntity != null) {
            providerKey = modelEntity.getProvider();
            modelId = modelEntity.getId();
            // parse parameters JSON for maxTokens and temperature
            Integer maxTokens = null;
            Float temperature = null;
            try {
                String params = modelEntity.getParameters();
                if (params != null && !params.isEmpty()) {
                    ObjectMapper mapper = new ObjectMapper();
                    JsonNode root = mapper.readTree(params);
                    if (root.has("maxTokens") && !root.get("maxTokens").isNull()) {
                        maxTokens = root.get("maxTokens").asInt();
                    }
                    if (root.has("temperature") && !root.get("temperature").isNull()) {
                        temperature = (float) root.get("temperature").asDouble();
                    }
                }
            } catch (Exception ignore) {}
            runtimeConfig = new LLMRuntimeConfig(
                    modelEntity.getApiKey(),
                    modelEntity.getModel(),
                    modelEntity.getBaseUrl(),
                    maxTokens,
                    temperature
            );
        }

        AbstractLLMChatService chatService = resolveProvider(providerKey);

        long start = System.currentTimeMillis();
        try {
            ChatCompletionResponse resp = (runtimeConfig != null)
                    ? chatService.chat(messageList, runtimeConfig)
                    : chatService.chat(messageList);
            long latency = System.currentTimeMillis() - start;

            String content = resp.getChoices().isEmpty() ? "" : resp.getChoices().get(0).getMessage().getContent();

            // 使用模型返回的usage；若无则估算
            Integer promptTokens = null;
            Integer completionTokens = null;
            Integer totalTokens = null;
            if (resp.getUsage() != null && resp.getUsage().getTotalTokens() > 0) {
                promptTokens = resp.getUsage().getPromptTokens();
                completionTokens = resp.getUsage().getCompletionTokens();
                totalTokens = resp.getUsage().getTotalTokens();
            } else {
                int pt = tokenEstimator.estimateTokens(chatCompletionMessageRequest.getPrompt());
                int ct = tokenEstimator.estimateTokens(content);
                promptTokens = pt;
                completionTokens = ct;
                totalTokens = pt + ct;
            }

            Integer costCents = pricingService.calcCostCents(modelEntity, promptTokens, completionTokens);

            // usage 记录（带显式tokens与成本）
            usageService.recordSuccess(
                    tenantId,
                    requestId,
                    userId,
                    modelId != null ? modelId : -1L,
                    providerKey,
                    modelEntity != null ? modelEntity.getCode() : providerKey,
                    promptTokens,
                    completionTokens,
                    totalTokens,
                    latency,
                    costCents
            );
            metricsService.recordSuccess(providerKey, tenantId, modelEntity != null ? modelEntity.getCode() : providerKey, latency);
            return content;
        } catch (Exception e) {
            long latency = System.currentTimeMillis() - start;
            usageService.recordFailure(
                    tenantId,
                    requestId,
                    userId,
                    modelId != null ? modelId : -1L,
                    providerKey,
                    modelEntity != null ? modelEntity.getCode() : providerKey,
                    "LLM_ERROR",
                    e.getMessage(),
                    latency
            );
            metricsService.recordFailure(providerKey, tenantId, modelEntity != null ? modelEntity.getCode() : providerKey, latency);
            throw new RuntimeException(e);
        }
    }

    /**
     * 处理流式聊天请求并返回一个用于接收服务端推送事件的 SseEmitter。
     * <p>
     * 与 {@link #chat(ChatCompletionMessageRequest)} 类似，但支持流式输出。
     * 响应内容通过 SSE 实时推送给客户端。
     *
     * @param chatCompletionMessageRequest 包含聊天上下文、租户ID、用户ID等信息的请求对象
     * @return 用于接收流式响应的 SseEmitter 实例
     */
    public SseEmitter chatStream(ChatCompletionMessageRequest chatCompletionMessageRequest) {
        final List<ChatCompletionMessage> messageList = Arrays.asList(
                new ChatCompletionMessage(ChatMessageRole.USER.value(), chatCompletionMessageRequest.getPrompt())
        );

        String tenantId = chatCompletionMessageRequest.getTenantId();
        String userId = chatCompletionMessageRequest.getUserId();
        String requestId = UUID.randomUUID().toString();

        Optional<LLMModelEntity> userModelOpt = Optional.empty();
        if (tenantId != null && userId != null) {
            userModelOpt = modelService.findActiveModelForUser(tenantId, userId);
        }
        LLMModelEntity modelEntity = userModelOpt
                .or(() -> tenantId != null ? modelService.getDefaultModel(tenantId) : Optional.empty())
                .or(() -> modelService.findByGlobalCode(modal))
                .orElse(null);

        String providerKey = modal; // fallback
        LLMRuntimeConfig runtimeConfig = null;
        Long modelId = null;
        if (modelEntity != null) {
            providerKey = modelEntity.getProvider();
            modelId = modelEntity.getId();
            Integer maxTokens = null;
            Float temperature = null;
            try {
                String params = modelEntity.getParameters();
                if (params != null && !params.isEmpty()) {
                    ObjectMapper mapper = new ObjectMapper();
                    JsonNode root = mapper.readTree(params);
                    if (root.has("maxTokens") && !root.get("maxTokens").isNull()) {
                        maxTokens = root.get("maxTokens").asInt();
                    }
                    if (root.has("temperature") && !root.get("temperature").isNull()) {
                        temperature = (float) root.get("temperature").asDouble();
                    }
                }
            } catch (Exception ignore) {}
            runtimeConfig = new LLMRuntimeConfig(
                    modelEntity.getApiKey(),
                    modelEntity.getModel(),
                    modelEntity.getBaseUrl(),
                    maxTokens,
                    temperature
            );
        }

        AbstractLLMChatService chatService = resolveProvider(providerKey);
        SseEmitter emitter = new SseEmitter(0L); // no timeout
        long start = System.currentTimeMillis();
        try {
            String content = (runtimeConfig != null)
                    ? chatService.chat(emitter, messageList, runtimeConfig)
                    : chatService.chat(emitter, messageList);
            long latency = System.currentTimeMillis() - start;

            // Prefer provider-reported usage; fallback to estimation
            Integer pt;
            Integer ct;
            Integer total;
            Usage providerUsage = chatService.consumeLastUsage();
            if (providerUsage != null && providerUsage.getTotalTokens() > 0) {
                pt = providerUsage.getPromptTokens();
                ct = providerUsage.getCompletionTokens();
                total = providerUsage.getTotalTokens();
            } else {
                pt = tokenEstimator.estimateTokens(chatCompletionMessageRequest.getPrompt());
                ct = tokenEstimator.estimateTokens(content);
                total = pt + ct;
            }
            Integer costCents = pricingService.calcCostCents(modelEntity, pt, ct);

            usageService.recordSuccess(
                    tenantId,
                    requestId,
                    userId,
                    modelId != null ? modelId : -1L,
                    providerKey,
                    modelEntity != null ? modelEntity.getCode() : providerKey,
                    pt,
                    ct,
                    total,
                    latency,
                    costCents
            );
            metricsService.recordSuccess(providerKey, tenantId, modelEntity != null ? modelEntity.getCode() : providerKey, latency);
        } catch (Exception e) {
            long latency = System.currentTimeMillis() - start;
            usageService.recordFailure(
                    tenantId,
                    requestId,
                    userId,
                    modelId != null ? modelId : -1L,
                    providerKey,
                    modelEntity != null ? modelEntity.getCode() : providerKey,
                    "LLM_ERROR",
                    e.getMessage(),
                    latency
            );
            metricsService.recordFailure(providerKey, tenantId, modelEntity != null ? modelEntity.getCode() : providerKey, latency);
        }
        return emitter;
    }

    /**
     * 根据提供的模型提供商名称解析并返回对应的聊天服务实现。
     * <p>
     * 支持别名映射处理，如将 "ollama" 映射到标准枚举值。
     * 若无法识别，则回退到全局默认模型。
     *
     * @param provider 模型提供商标识符
     * @return 对应的聊天服务实例
     */
    private AbstractLLMChatService resolveProvider(String provider) {
        // Map common aliases to enum keys if needed
        if (provider == null) provider = modal;
        String key = provider.toLowerCase();
        if (key.startsWith("ollama")) key = LlmModelEnum.OLLAMA.getModel();
        if (LlmModelEnum.KIMI.getModel().equals(key)
                || LlmModelEnum.DEEPSEEK.getModel().equals(key)
                || LlmModelEnum.OLLAMA.getModel().equals(key)) {
            return LLMServiceFactory.getLLMService(key);
        }
        // default fallback
        return LLMServiceFactory.getLLMService(modal);
    }

}
