package com.sys.ai.builder;

import com.gy.ai.agent.core.ReqOptions;
import com.gy.ai.agent.enums.RoleEnums;
import com.gy.ai.agent.platform.dashscope.entity.DashscopePlatformChatReq;
import com.gy.ai.agent.platform.ollama.entity.OllamaChatReq;
import com.gy.ai.agent.platform.siliconflow.entity.SiliconflowChatReq;
import com.gy.ai.agent.platform.siliconflow.entity.SiliconflowGenerateImageReq;
import com.sys.ai.domain.AiApps;
import com.sys.ai.domain.AiModel;
import com.sys.ai.domain.AiPlatform;
import com.sys.ai.domain.AiSendHistory;
import com.sys.ai.enums.LLMModelTypeEnums;
import com.sys.ai.enums.PlatformEnums;
import com.sys.ai.fo.SendMessageFO;
import com.sys.ai.service.IAiSendHistoryService;
import com.sys.ai.vo.KnowledgeFragmentationVO;
import com.sys.ai.vo.MessageVO;
import com.sys.ai.vo.ReqMessage;
import com.sys.common.core.domain.model.LoginUser;
import com.sys.common.utils.EnumUtil;
import org.apache.commons.lang3.StringUtils;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;

import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;

/**
 * @author LGY
 * Create by 2025/5/15 14:31
 */
public class AIMessageBuilder {

    /**
     * 默认提示词
     */
    private static final String DEFAULT_PROMPT = """
            你是一个AI助手，请根据用户的问题，结合知识库内容，给出最合适的答案。
            如果用户问题与知识库内容无关，请直接输出“抱歉，我不太理解您的问题。”
            如果用户问题与知识库内容有关，请根据知识库内容给出最合适的答案。
            """;

    /**
     * 构建LLM请求参数
     *
     * @param sendHistoryService 了历史发送API
     * @param messageVO          消息体承载对象
     * @param sendMessageFO      发送入参
     * @param aiApps             应用信息
     * @param knowledgeQAVOList  知识库信息
     * @param aiModel            模型信息
     * @return
     */
    public static ReqOptions buildRequestParam(IAiSendHistoryService sendHistoryService, MessageVO messageVO, SendMessageFO sendMessageFO,
                                               AiApps aiApps, List<KnowledgeFragmentationVO> knowledgeQAVOList, AiModel aiModel,
                                               AiPlatform aiPlatform, LoginUser currentUser) {


        PlatformEnums platformEnums = EnumUtil.getEnumByCode(aiPlatform.getCode(), PlatformEnums.class);
        Assert.notNull(platformEnums, "暂不支持的LLM平台");
        return switch (platformEnums) {
            case SILICON_FLOW ->
                    buildSiliconflowReqParam(aiModel, sendHistoryService, messageVO, sendMessageFO, aiApps, knowledgeQAVOList, currentUser);
            case OLLAMA ->
                    buildOllamaReqParam(aiModel, sendHistoryService, messageVO, sendMessageFO, aiApps, knowledgeQAVOList, currentUser);
            case DASHSCOPE ->
                    buildDashscopeReqParam(aiModel, sendHistoryService, messageVO, sendMessageFO, aiApps, knowledgeQAVOList, currentUser);
            default ->
                    throw new RuntimeException("暂不支持的LLM平台");
        };

    }

    private static ReqOptions buildDashscopeReqParam(AiModel aiModel, IAiSendHistoryService sendHistoryService, MessageVO messageVO,
                                                     SendMessageFO sendMessageFO, AiApps aiApps, List<KnowledgeFragmentationVO> knowledgeQAVOList,
                                                     LoginUser currentUser) {
        LLMModelTypeEnums llmModelTypeEnums = EnumUtil.getEnumByCode(aiModel.getType(), LLMModelTypeEnums.class);
        Assert.notNull(llmModelTypeEnums, "暂不支持的模型类型");
        ReqOptions reqOptions = null;
        switch (llmModelTypeEnums) {
            case CHAT:
                DashscopePlatformChatReq dashscopePlatformChatReq = new DashscopePlatformChatReq();
                List<ReqMessage> messages = AIMessageBuilder.buildWaitSendMessage(sendHistoryService, messageVO, sendMessageFO, aiApps,
                        knowledgeQAVOList, currentUser);
                List<DashscopePlatformChatReq.Message> messageList = messages.stream().map(item -> {
                    DashscopePlatformChatReq.Message message = new DashscopePlatformChatReq.Message();
                    message.setRole(item.getRole())
                            .setContent(item.getContent());
                    return message;
                }).toList();
                boolean isPreview = sendMessageFO.getHasPreview() != null && sendMessageFO.getHasPreview();
                Integer maxTokens = isPreview ? sendMessageFO.getPreviewParam().getMaxTokens() : (aiApps != null ? aiApps.getMaxTokens() : null);
                BigDecimal temperature = isPreview ? sendMessageFO.getPreviewParam().getTemperature() : (aiApps != null ? aiApps.getTemperature() : null);
                BigDecimal topP = isPreview ? sendMessageFO.getPreviewParam().getTopP() : (aiApps != null ? aiApps.getTopP() : null);
                Integer topK = isPreview ? sendMessageFO.getPreviewParam().getTopK() : (aiApps != null ? aiApps.getTopK() : null);
                dashscopePlatformChatReq.setStream(true)
                        .setMessages(messageList)
                        .setMax_tokens(maxTokens)
                        .setTemperature(temperature)
                        .setTop_p(topP)
                        .setTop_k(topK)
                        .setEnable_thinking(true);
                if (!sendMessageFO.isUseDeepThing()) {
                    dashscopePlatformChatReq.setEnable_thinking(false)
                            .setThinking_budget(1);
                }
                reqOptions = dashscopePlatformChatReq;
                break;
            default:
                throw new RuntimeException("暂不支持的模型类型");
        }
        return reqOptions;
    }

    /**
     * 构建ollama的请求参数
     *
     * @param aiModel            模型
     * @param sendHistoryService 发送历史服务
     * @param messageVO          消息体承载对象
     * @param sendMessageFO      发送入参
     * @param aiApps             应用信息
     * @param knowledgeQAVOList  知识库信息
     * @return
     */
    private static ReqOptions buildOllamaReqParam(AiModel aiModel, IAiSendHistoryService sendHistoryService, MessageVO messageVO,
                                                  SendMessageFO sendMessageFO, AiApps aiApps, List<KnowledgeFragmentationVO> knowledgeQAVOList, LoginUser currentUser) {
        LLMModelTypeEnums llmModelTypeEnums = EnumUtil.getEnumByCode(aiModel.getType(), LLMModelTypeEnums.class);
        Assert.notNull(llmModelTypeEnums, "暂不支持的模型类型");
        ReqOptions reqOptions = null;
        switch (llmModelTypeEnums) {
            case CHAT:
                OllamaChatReq ollamaChatReq = new OllamaChatReq();
                List<ReqMessage> messages = AIMessageBuilder.buildWaitSendMessage(sendHistoryService, messageVO, sendMessageFO, aiApps,
                        knowledgeQAVOList, currentUser);
                List<OllamaChatReq.Message> messageList = messages.stream().map(item -> {
                    OllamaChatReq.Message message = new OllamaChatReq.Message();
                    message.setRole(item.getRole())
                            .setContent(item.getContent());
                    return message;
                }).toList();
                boolean isPreview = sendMessageFO.getHasPreview() != null && sendMessageFO.getHasPreview();
                Integer maxTokens = isPreview ? sendMessageFO.getPreviewParam().getMaxTokens() : (aiApps != null ? aiApps.getMaxTokens() : null);
                BigDecimal temperature = isPreview ? sendMessageFO.getPreviewParam().getTemperature() : (aiApps != null ? aiApps.getTemperature() : null);
                BigDecimal topP = isPreview ? sendMessageFO.getPreviewParam().getTopP() : (aiApps != null ? aiApps.getTopP() : null);
                Integer topK = isPreview ? sendMessageFO.getPreviewParam().getTopK() : (aiApps != null ? aiApps.getTopK() : null);
                ollamaChatReq.setMessages(messageList)
                        .setOptions(new OllamaChatReq.Modelfile().setTemperature(temperature)
                                .setTop_p(topP)
                                .setTop_k(topK));
                reqOptions = ollamaChatReq;
                break;
            default:
                throw new RuntimeException("暂不支持的模型类型");
        }
        return reqOptions;
    }

    /**
     * 构建硅基流动的请求参数
     *
     * @param aiModel            模型
     * @param sendHistoryService 发送历史服务
     * @param messageVO          消息体承载对象
     * @param sendMessageFO      发送入参
     * @param aiApps             应用信息
     * @param knowledgeQAVOList  知识库信息
     * @return
     */
    public static ReqOptions buildSiliconflowReqParam(AiModel aiModel, IAiSendHistoryService sendHistoryService, MessageVO messageVO,
                                                      SendMessageFO sendMessageFO, AiApps aiApps, List<KnowledgeFragmentationVO> knowledgeQAVOList,
                                                      LoginUser currentUser) {
        LLMModelTypeEnums llmModelTypeEnums = EnumUtil.getEnumByCode(aiModel.getType(), LLMModelTypeEnums.class);
        Assert.notNull(llmModelTypeEnums, "暂不支持的模型类型");
        ReqOptions reqOptions = null;
        switch (llmModelTypeEnums) {
            case CHAT:
                SiliconflowChatReq siliconflowChatReq = new SiliconflowChatReq();
                List<ReqMessage> messages = AIMessageBuilder.buildWaitSendMessage(sendHistoryService, messageVO, sendMessageFO, aiApps,
                        knowledgeQAVOList, currentUser);
                List<SiliconflowChatReq.Message> messageList = messages.stream().map(item -> {
                    SiliconflowChatReq.Message message = new SiliconflowChatReq.Message();
                    message.setRole(item.getRole())
                            .setContent(item.getContent());
                    return message;
                }).toList();
                boolean isPreview = sendMessageFO.getHasPreview() != null && sendMessageFO.getHasPreview();
                Integer maxTokens = isPreview ? sendMessageFO.getPreviewParam().getMaxTokens() : (aiApps != null ? aiApps.getMaxTokens() : null);
                BigDecimal temperature = isPreview ? sendMessageFO.getPreviewParam().getTemperature() : (aiApps != null ? aiApps.getTemperature() : null);
                BigDecimal topP = isPreview ? sendMessageFO.getPreviewParam().getTopP() : (aiApps != null ? aiApps.getTopP() : null);
                Integer topK = isPreview ? sendMessageFO.getPreviewParam().getTopK() : (aiApps != null ? aiApps.getTopK() : null);
                siliconflowChatReq.setStream(true)
                        .setMessages(messageList)
                        .setMax_tokens(maxTokens)
                        .setTemperature(temperature)
                        .setTop_p(topP)
                        .setTop_k(topK);
                if (!sendMessageFO.isUseDeepThing()) {
                    siliconflowChatReq.setThinking_budget(1);
                }
                reqOptions = siliconflowChatReq;
                break;
            case IMAGE:
                SiliconflowGenerateImageReq siliconflowGenerateImageReq = new SiliconflowGenerateImageReq();
                siliconflowGenerateImageReq.setPrompt(sendMessageFO.getMessage())
                        .setImage_size("1024x1024")
                        .setBatch_size(4)
                        .setNum_inference_steps(20)
                        .setGuidance_scale(BigDecimal.valueOf(7.5));
                reqOptions = siliconflowGenerateImageReq;
                break;
            default:
                throw new RuntimeException("暂不支持的模型类型");
        }
        return reqOptions;
    }

    /**
     * 获取提示词
     *
     * @param sendMessageFO 入参
     * @param aiApps        应用信息
     * @return
     */
    private static String getPrompt(SendMessageFO sendMessageFO, AiApps aiApps, List<KnowledgeFragmentationVO> knowledgeQAVOList) {
        String prompt = aiApps != null ? aiApps.getPrompt() : DEFAULT_PROMPT;
        if (sendMessageFO.getHasPreview() != null && sendMessageFO.getHasPreview()) {
            // 预览模式，用预览模式的结果入参
            prompt = sendMessageFO.getPreviewParam().getPrompts();
        }
        // 知识库模式
        boolean isPreview = sendMessageFO.getHasPreview() != null && sendMessageFO.getHasPreview();
        // 是否需要大模型再渲染
        boolean hasLlmRender = false;
        if (isPreview) {
            hasLlmRender = Objects.equals("Y", sendMessageFO.getPreviewParam().getHasLlmRender());
        } else {
            hasLlmRender = Objects.equals("Y", aiApps != null ? aiApps.getHasLlmRender() : null);
        }
        if (!hasLlmRender) {
            // 无需再次渲染的
            return prompt;
        }
        String knowledgeContent = "未从知识库查询到相关内容，请换个问题咨询";
        if (!CollectionUtils.isEmpty(knowledgeQAVOList)) {
            List<String> fragmentationList = knowledgeQAVOList.stream().map(KnowledgeFragmentationVO::getFragmentation).toList();
            knowledgeContent = StringUtils.join(fragmentationList, "\n");
        }
        if (prompt.contains("{{content}}")) {
            prompt = prompt.replace("{{content}}", knowledgeContent);
        } else {
            prompt += "\n 知识库查询内容数据如下：\n" + knowledgeContent;
        }
        return prompt;
    }

    /**
     * 构建消息发送参数
     *
     * @param sendHistoryService 历史记录
     * @param messageVO          请求的分组编码
     * @param sendMessageFO      发送入参
     * @param aiApps             应用信息
     * @param knowledgeQAVOList  知识库信息
     * @return
     */
    public static List<ReqMessage> buildWaitSendMessage(IAiSendHistoryService sendHistoryService, MessageVO messageVO, SendMessageFO sendMessageFO,
                                                        AiApps aiApps, List<KnowledgeFragmentationVO> knowledgeQAVOList, LoginUser currentUser) {
        List<ReqMessage> messageList = new ArrayList<>(15);
        String prompt = AIMessageBuilder.getPrompt(sendMessageFO, aiApps, knowledgeQAVOList);
        AIMessageBuilder.buildNormalMessage(sendHistoryService, messageVO, sendMessageFO, prompt, messageList, currentUser);
        return messageList;
    }

    /**
     * '
     * 构建正常对话的消息体
     *
     * @param sendHistoryService 历史消息API
     * @param messageVO          消息体承载对象
     * @param sendMessageFO      发送入参
     * @param prompt             提示词
     * @param messageList        返回消息体
     */
    private static void buildNormalMessage(IAiSendHistoryService sendHistoryService, MessageVO messageVO, SendMessageFO sendMessageFO,
                                           String prompt, List<ReqMessage> messageList, LoginUser currentUser) {
        // 根据分组编码查询最后五条的问答记录
        List<AiSendHistory> sendHistoryList = StringUtils.isBlank(messageVO.getRequestAnswerGroupCode()) ? new ArrayList<>() :
                sendHistoryService.findLatestFiveByGroupCode(messageVO.getRequestAnswerGroupCode(), currentUser);
        // 提示词
        ReqMessage systemMsg = new ReqMessage(RoleEnums.SYSTEM.getValue(), prompt);
        // 用户最新提问
        ReqMessage userMsg = new ReqMessage(RoleEnums.USER.getValue(), sendMessageFO.getMessage());
        // 先添加提示词的系统message
        messageList.add(systemMsg);
        if (!CollectionUtils.isEmpty(sendHistoryList)) {
            // 添加历史记录的message
            sendHistoryList.forEach(sendHistory -> {
                ReqMessage customerMsg = new ReqMessage(RoleEnums.USER.getValue(), sendHistory.getQuestion());
                ReqMessage sysMessage = new ReqMessage(RoleEnums.ASSISTANT.getValue(), sendHistory.getContent());
                messageList.add(customerMsg);
                messageList.add(sysMessage);
            });
        }
        // 添加用户输入的message
        messageList.add(userMsg);
    }
}
