package com.learn.english.utils;

import com.learn.english.constants.APIConstant;
import com.learn.english.constants.CacheConstants;
import com.learn.english.constants.ChatConstants;
import com.learn.english.enums.TimeEnum;
import com.learn.english.mapper.ChatMessagesMapper;
import com.learn.english.model.param.ChatParameters;
import com.learn.english.model.param.Emitters;
import com.learn.english.model.param.MReply;
import com.learn.english.model.param.Messages;
import com.learn.english.service.RedisService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import org.springframework.web.reactive.function.client.WebClient;

import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicReference;

/**
 * @author TISNIW
 * @version 1.0
 * @date 2025/6/14 15:49
 * @desc
 */
@Slf4j
@Component
public class ChatUtil {
    private final WebClient webClient;
    @Autowired
    private RedisService redisService;
    @Autowired
    private ChatMessagesMapper messagesMapper;

    public ChatUtil() {
        this.webClient = WebClient.builder()
                .baseUrl(APIConstant.OLLAMA_MODEL_LIST_URL)
                .build();
    }

    @Async
    public void chatStream(String model, String prompt, List<Messages> messages, String sessionId, boolean think, Runnable callback) {
        try {
            log.info("开始流式聊天: model={}, sessionId={}", model, sessionId);

            // 复制消息列表，避免修改原始列表
            List<Messages> msg = new ArrayList<>();
            Messages userMsg = new Messages();
            userMsg.setRole("user");
            String userPrompt = ChatConstants.PROMPT + "```" + prompt + "```" ;
            userMsg.setContent(userPrompt);
            msg.add(userMsg);

            Messages systemMsg = new Messages().builder()
                    .role("system")
                    .content(ChatConstants.PROMPT).build();
            msg.add(systemMsg);
            log.info(msg.toString());
            ChatParameters chatParameters = ChatParameters.builder()
                    .model(model)
                    .messages(msg)
                    .think(think)
                    .stream(true)
                    .build();

            // 用于拼接 AI 返回的完整回复
            AtomicReference<String> aiReplyContent = new AtomicReference<>("");
            AtomicReference<String> aiReplyThinking = new AtomicReference<>("");

            webClient.post()
                    .uri("/api/chat")
                    .bodyValue(chatParameters)
                    .retrieve()
                    .bodyToFlux(String.class)
                    .map(json -> {
                        try {
                            return JsonUtils.parseObject(json, MReply.class);
                        } catch (Exception e) {
                            log.error("解析LLM返回JSON失败: {}", json, e);
                            return null;
                        }
                    })
                    .filter(Objects::nonNull)
                    .doOnNext(mReply -> {
                        Map<String, Object> m = mReply.getMessage();
                        aiReplyContent.updateAndGet(s -> s + String.valueOf(m.get("content") != null ? m.get("content") : ""));
                        aiReplyThinking.updateAndGet(s -> s + String.valueOf(m.get("thinking") != null ? m.get("thinking") : ""));

                        Emitters.sendMessage(sessionId, JsonUtils.writeValueAsString(mReply));
                    })
                    .doOnError(error -> {
                        log.error("调用LLM模型出错", error);
                        Emitters.sendMessage(sessionId, "[ERROR] " + error.getMessage());
                        Emitters.removeEmitter(sessionId);
                    })
                    .doOnComplete(() -> {
                        // 流结束，保存 AI 回复到数据库
                        String finalReply = aiReplyContent.get();
                        String finalThinking = aiReplyThinking.get();
                        Messages m = new Messages();
                        if (finalReply != null && !finalReply.isEmpty()) {
                            messagesMapper.insertMessageAI(sessionId, "assistant", finalReply, finalThinking);
                            m.setRole("assistant");
                            m.setContent(finalReply);
                            m.setThinking(finalThinking);
                        }
                        chatParameters.getMessages().add(m);
                        redisService.setCacheObject(CacheConstants.AI_CHAT_SESSION_KEY + sessionId, chatParameters, TimeEnum.THIRTY_MINUTE);
                        Emitters.removeEmitter(sessionId);

                        // 在响应完成后调用回调函数（异步生成标题）
                        if (callback != null) {
                            callback.run();
                        }
                    })
                    .subscribe();
        } catch (Exception e) {
            log.error("流式聊天失败: {}", e.getMessage());
        }
    }

    /**
     * 生成会话标题
     *
     * @param model  模型名称
     * @param prompt 提示文本
     * @return 生成的标题
     */
    public String generateTitle(String model, String prompt) {
        try {
            String titlePrompt = "请基于以下内容生成一个20字以内的简洁中文标题：" + prompt;
            log.info("开始生成会话标题: model={}, prompt={}", model, titlePrompt);
            String result = callLLM("qwen3:4b", titlePrompt);
            return result;
        } catch (Exception e) {
            log.error("生成会话标题失败: {}", e.getMessage());
            return null;
        }
    }

    /**
     * 非流式调用LLM模型
     *
     * @param model  模型名称
     * @param prompt 提示文本
     * @return LLM返回的内容
     */
    private String callLLM(String model, String prompt) {
        try {
            // 构建消息列表
            List<Messages> messages = new ArrayList<>();
            Messages userMessage = new Messages();
            userMessage.setRole("user");
            userMessage.setContent(prompt);
            messages.add(userMessage);

            ChatParameters chatParameters = ChatParameters.builder()
                    .model(model)
                    .messages(messages)
                    .think(false)
                    .stream(false) // 非流式调用
                    .build();

            log.info("开始非流式调用LLM: model={}", model);

            // 创建CompletableFuture来处理异步调用
            CompletableFuture<String> future = new CompletableFuture<>();

            webClient.post()
                    .uri("/api/chat")
                    .bodyValue(chatParameters)
                    .retrieve()
                    .bodyToMono(String.class)
                    .map(json -> {
                        try {
                            MReply mReply = JsonUtils.parseObject(json, MReply.class);
                            if (mReply != null) {
                                Map<String, Object> message = mReply.getMessage();
                                if (message != null && message.containsKey("content")) {
                                    return String.valueOf(message.get("content"));
                                }
                            }
                            return "";
                        } catch (Exception e) {
                            log.error("解析LLM返回JSON失败: {}", json, e);
                            return "";
                        }
                    })
                    .subscribe(
                            content -> future.complete(content),
                            error -> {
                                log.error("非流式调用LLM模型失败: model={}, prompt={}", model, prompt, error);
                                future.completeExceptionally(error);
                            }
                    );

            // 等待结果，设置超时时间
            String result = future.get();
            log.info("LLM非流式调用成功，返回内容长度: {}", result != null ? result.length() : 0);
            return result;

        } catch (Exception e) {
            log.error("非流式调用LLM模型失败: model={}, prompt={}", model, prompt, e);
            throw new RuntimeException("LLM调用失败: " + e.getMessage(), e);
        }
    }

    // 保留原有的无回调方法
    public void chatStream(String model, String prompt, List<Messages> messages, String sessionId, boolean think) {
        chatStream(model, prompt, messages, sessionId, think, null);
    }
}
