package club.babyf.nativeai.service.impl;

import club.babyf.nativeai.service.IChatService;
import club.babyf.nativeai.config.NativeAiProperties;
import club.babyf.nativeai.constants.MsgConstant;
import club.babyf.nativeai.entity.CustomHashMap;
import club.babyf.nativeai.service.IWechatCallbackService;
import club.babyf.nativeai.utils.AsyncBaseQueue;
import club.babyf.nativeai.utils.DataCounter;
import club.babyf.nativeai.utils.JsonCustomUtil;
import cn.hutool.core.util.StrUtil;
import com.fasterxml.jackson.core.type.TypeReference;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import me.chanjar.weixin.mp.api.WxMpService;
import me.chanjar.weixin.mp.bean.kefu.WxMpKefuMessage;
import me.chanjar.weixin.mp.bean.message.WxMpXmlMessage;
import me.chanjar.weixin.mp.bean.message.WxMpXmlOutMessage;
import me.chanjar.weixin.mp.bean.message.WxMpXmlOutTextMessage;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Service;

import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.*;

/**
 * WechatCallbackServiceImpl.
 *
 * @author zhanglei.
 * @date 2025/2/19 14:23.
 * @description 微信回调服务实现.
 */
@Slf4j
@Service
@RequiredArgsConstructor
public class WechatCallbackServiceImpl implements IWechatCallbackService {

    /**
     * 设置缓存大小
     */
    private static final int CACHE_SIZE = 100;

    /**
     * caffeine实现LRU缓存（数据缓存）
     */
    private static final Cache<String, String> CHAT_GPT_CACHE = Caffeine.newBuilder()
            .maximumSize(CACHE_SIZE)
            .expireAfterWrite(1, TimeUnit.HOURS)
            .build();

    private final IChatService chatService;

    private final NativeAiProperties nativeAiProperties;

    private final WxMpService wxMpService;

    /**
     * 同步消息处理
     *
     * @param inMessage 消息内容
     */
    @Override
    public WxMpXmlOutMessage syncHandleMessage(WxMpXmlMessage inMessage) {
        // 入参
        if (inMessage == null) {
            log.error("sync message handle param is null");
            throw new RuntimeException("sync message handle param is null");
        }
        // 为发送者回复消息
        WxMpXmlOutTextMessage outMessage = new WxMpXmlOutTextMessage();
        outMessage.setToUserName(inMessage.getFromUser());
        outMessage.setFromUserName(inMessage.getToUser());
        outMessage.setCreateTime(System.currentTimeMillis() / 1000L);
        // 声明响应结果
        String content;
        try {
            // 问答接口
            content = chat(inMessage.getContent());
        } catch (Exception e) {
            log.error("sync chatgpt send and reply error，reason：{}", ExceptionUtils.getFullStackTrace(e));
            // 为发送者回复消息
            content = MsgConstant.PROCESSING_AGAIN_HINT;
        }
        // 为发送者回复消息
        outMessage.setContent(content);
        // 返回消息结果
        return outMessage;
    }

    /**
     * 异步消息处理（使用客服接口异步回复，需要公众号进行微信认证）
     *
     * @param inMessage 消息内容
     */
    @Override
    public WxMpXmlOutMessage asyncHandleMessage(WxMpXmlMessage inMessage) {
        // 异步执行chatgpt问答接口
        CompletableFuture.supplyAsync(() -> {
            try {
                // 入参
                if (inMessage == null) {
                    log.error("async message handle param is null");
                    throw new RuntimeException("async message handle param is null");
                }
                // 问答接口
                String content = chat(inMessage.getContent());
                // 为发送者回复消息
                wxMpService.getKefuService().sendKefuMessage(WxMpKefuMessage.TEXT()
                        .toUser(inMessage.getFromUser())
                        .content(content)
                        .build());
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
            return "end of execution !!!";
        }, AsyncBaseQueue.SENDER_ASYNC).exceptionally(e -> {
            CompletionException completionException = new CompletionException(e);
            log.error("async chatgpt send and reply error，reason：{}",
                    ExceptionUtils.getFullStackTrace(completionException));
            throw completionException;
        });
        // 等待消息处理
        return waitHandleMessage(inMessage);
    }

    /**
     * 等待消息处理（配合客服接口异步回复使用，同步立即返回）
     *
     * @param inMessage 消息内容
     * @return 消息结果
     */
    private WxMpXmlOutMessage waitHandleMessage(WxMpXmlMessage inMessage) {
        if (inMessage == null) {
            return null;
        }
        // 声明响应结果
        String content = MsgConstant.PROCESSING_HINT;
        // 为发送者回复消息
        WxMpXmlOutTextMessage outMessage = new WxMpXmlOutTextMessage();
        outMessage.setToUserName(inMessage.getFromUser());
        outMessage.setFromUserName(inMessage.getToUser());
        outMessage.setCreateTime(System.currentTimeMillis() / 1000L);
        outMessage.setContent(content);
        // 返回消息结果
        return outMessage;
    }

    /**
     * 聊天
     *
     * @param questions 内容
     * @return 回复
     */
    private String chat(String questions) {
        // 判断问题是否为空
        if (StringUtils.isBlank(questions)) {
            return MsgConstant.FOCUS_ON_HINT;
        }
        // 从缓存中获取答案
        log.info("questionsFromCache: {}", questions);
        String answersFromCache = doChatFromCache(questions);
        log.info("questionsFromCache: {}, answersFromCache: {}", questions, answersFromCache);
        if (StringUtils.isNotBlank(answersFromCache)) {
            return answersFromCache;
        }
        // 从大模型获取答案
        log.info("questionsFromChat: {}", questions);
        String answersFromChat = doChat(questions);
        log.info("questionsFromChat: {}, answersFromChat: {}", questions, answersFromChat);
        if (StringUtils.isNotBlank(answersFromChat)) {
            CHAT_GPT_CACHE.put(questions, answersFromChat);
            return answersFromChat;
        }
        // 兜底提示
        return MsgConstant.PROCESSING_AGAIN_HINT;
    }

    /**
     * 从缓存中获取答案（微信公众号回调超时会再回调2次，重试后还是未得到回复会报异常，防止出现异常使用计数方式处理）
     *
     * @param questions 问题
     * @return 答案
     */
    private String doChatFromCache(String questions) {
        // 从缓存中获取答案
        String answersFromCache = CHAT_GPT_CACHE.getIfPresent(questions);
        if (StringUtils.isNotBlank(answersFromCache)) {
            return answersFromCache;
        }
        // 同一请求计数
        int increment = DataCounter.increment(questions);
        log.info("requestParam: {}, requestCount: {}", questions, increment);
        // 同一请求计数判断
        // 第1次请求（第1次请求从缓存中获取不到结果直接返回null，穿透到大模型层）
        if (increment == 1) {
            return null;
        }
        // 第2次请求（第2次请求则要拉长处理时间，在5秒内如果能将第1次请求大模型返回的数据写在缓存中的话则直接返回给请求方，如果超过5秒则拉长请求导致超时是为了让微信服务器发起第3次重试）
        if (increment == 2) {
            return checkACacheForQ(questions, 60L, TimeUnit.SECONDS);
        }
        // 支持同一问题在未获取到结果的情况下重试（目前微信服务器只会最多重试3次，每次间隔忽略不计，每次超时时间5秒）
        return checkACacheForQ(questions, 4500L, TimeUnit.MILLISECONDS);
    }

    /**
     * 检查缓存中是否存在指定的问题答案，存在则返回对应的值，不存在则定时检查，直到超时
     *
     * @param questions 问题 key
     * @param timeout   超时时间
     * @param timeUnit  超时时间单位
     * @return 缓存中的答案，如果超时仍未找到则返回 null
     */
    private String checkACacheForQ(String questions, long timeout, TimeUnit timeUnit) {
        // 创建调度器
        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
        // 用于存储结果
        CompletableFuture<String> resultFuture = new CompletableFuture<>();
        // 定时任务
        Runnable checkTask = () -> {
            String answers = CHAT_GPT_CACHE.getIfPresent(questions);
            if (answers != null) {
                // 如果找到值，完成 Future 并返回结果
                resultFuture.complete(answers);
            }
        };
        // 安排定时任务，初始延迟 0，间隔 500 毫秒
        ScheduledFuture<?> scheduledFuture = scheduler.scheduleAtFixedRate(checkTask, 0, 500, TimeUnit.MILLISECONDS);
        // 设置超时
        scheduler.schedule(() -> {
            if (!resultFuture.isDone()) {
                // 如果超时仍未找到值，取消任务并完成 Future
                resultFuture.complete(MsgConstant.PROCESSING_AGAIN_HINT);
                // 取消定时任务
                scheduledFuture.cancel(true);
            }
        }, timeout, timeUnit);
        // 阻塞等待结果
        try {
            return resultFuture.get();
        } catch (InterruptedException | ExecutionException e) {
            log.error("checkACacheForQ error: {}", ExceptionUtils.getFullStackTrace(e));
            return MsgConstant.PROCESSING_AGAIN_HINT;
        } finally {
            // 关闭调度器
            scheduler.shutdown();
        }
    }

    /**
     * 从大模型获取答案
     *
     * @param questions 问题
     * @return 答案
     */
    private String doChat(String questions) {
        String body = getBody(questions);
        String authorization = "Bearer " + nativeAiProperties.getOpenai().getApiKey();
        Object chat = chatService.chat(authorization, false, body);
        if (chat != null) {
            CustomHashMap<String, Object> chatMap = JsonCustomUtil.parse(chat.toString(), CustomHashMap.class);
            List<CustomHashMap<String, Object>> choices = chatMap.getList("choices", new TypeReference<>() {
            });
            List<String> contentList = new ArrayList<>();
            for (CustomHashMap<String, Object> choice : choices) {
                CustomHashMap<String, Object> messageResult = choice.getObject("message", CustomHashMap.class);
                String answers = messageResult.getString("content");
                contentList.add(answers);
            }
            return StrUtil.join("^^^^^^", contentList);
        }
        return null;
    }

    /**
     * 获取请求体
     *
     * @param questions 问题
     * @return 请求体
     */
    private String getBody(String questions) {
        String bodyTemplate = """
                {
                    "model": "gpt-4o",
                    "messages": [
                        {
                            "role": "user",
                            "content": "%s"
                        }
                    ],
                    "temperature": 0.7,
                    "seed": null,
                    "stream": false,
                    "response_format": {
                        "type": "text"
                    },
                    "top_p": 1,
                    "n": 1,
                    "max_tokens": 1000,
                    "presence_penalty": 0,
                    "frequency_penalty": 0,
                    "logit_bias": {}
                }
                """;
        return String.format(bodyTemplate, questions);
    }

}
