package com.cfp4cloud.cfp.knowledge.support.flow.model.processor;

import cn.hutool.core.lang.Dict;
import cn.hutool.core.lang.Pair;
import cn.hutool.extra.template.TemplateConfig;
import cn.hutool.extra.template.TemplateEngine;
import cn.hutool.extra.template.TemplateUtil;
import com.cfp4cloud.cfp.admin.api.feign.RemoteFileService;
import com.cfp4cloud.cfp.common.core.constant.enums.YesNoEnum;
import com.cfp4cloud.cfp.knowledge.dto.AiFlowExecuteDTO;
import com.cfp4cloud.cfp.knowledge.service.AiAssistantService;
import com.cfp4cloud.cfp.knowledge.service.AiStreamAssistantService;
import com.cfp4cloud.cfp.knowledge.support.flow.constants.ExecutionStatusEnums;
import com.cfp4cloud.cfp.knowledge.support.flow.constants.FlowConstant;
import com.cfp4cloud.cfp.knowledge.support.flow.constants.NodeTypeConstants;
import com.cfp4cloud.cfp.knowledge.support.flow.core.FlowContextHolder;
import com.cfp4cloud.cfp.knowledge.support.flow.core.FlowException;
import com.cfp4cloud.cfp.knowledge.support.flow.model.AiNodeDefinition;
import com.cfp4cloud.cfp.knowledge.support.flow.model.nodes.AiLLMNode;
import com.cfp4cloud.cfp.knowledge.support.provider.ChatMemoryAdvisorProvider;
import com.cfp4cloud.cfp.knowledge.support.provider.ModelProvider;
import dev.langchain4j.data.message.*;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.chat.StreamingChatModel;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import feign.Response;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import static com.cfp4cloud.cfp.knowledge.support.constant.AiChatConstants.END_MSG;

/**
 * LLM节点处理器 负责处理流程中的大语言模型节点，包括消息模板渲染、模型调用和结果处理
 */
@Slf4j
@Component(NodeTypeConstants.LLM)
@RequiredArgsConstructor
public class LLMNodeProcessor extends AbstractNodeProcessor {

	/**
	 * 模板引擎，用于变量替换
	 */
	public static final TemplateEngine engine = TemplateUtil.createEngine(new TemplateConfig());

	private final ChatMemoryAdvisorProvider chatMemoryAdvisorProvider;

	private final ModelProvider modelProvider;

	private final RemoteFileService remoteFileService;

	@Override
	protected Dict doExecute(AiNodeDefinition node, FlowContextHolder context) {
		try {
			// 验证节点配置
			AiLLMNode config = validateNodeConfig(node);

			// 获取输入参数并处理消息模板
			Dict variables = getInputVariables(node, context);
			List<ChatMessage> chatMessages = processChatMessages(config.getMessages(), variables);

			// 根据模型类型（普通/视觉）调用不同的处理逻辑
			if (YesNoEnum.YES.getCode().equals(config.getModelConfig().getIsVision())) {
				return processVisionModel(chatMessages, config.getModelConfig(), context);
			}
			else {
				return processRegularModel(chatMessages, config.getModelConfig(), context, node);
			}
		}
		catch (Exception e) {
			throw FlowException.nodeError(node.getId(), "[LLM节点] -> " + e.getMessage());
		}
	}

	/**
	 * 验证LLM节点配置的有效性
	 * @param node 节点定义
	 * @return 有效的LLM节点配置
	 */
	private AiLLMNode validateNodeConfig(AiNodeDefinition node) {
		// 检查LLM节点配置
		AiLLMNode config = node.getLlmParams();
		if (config == null) {
			throw FlowException.invalidParam("LLM节点配置无效");
		}

		// 检查模型配置
		AiLLMNode.Model modelConfig = config.getModelConfig();
		if (modelConfig == null) {
			throw FlowException.invalidParam("LLM节点模型配置无效");
		}

		// 检查消息配置
		List<AiLLMNode.Message> messages = config.getMessages();
		if (messages == null || messages.isEmpty()) {
			throw FlowException.invalidParam("LLM节点消息配置无效");
		}

		return config;
	}

	/**
	 * 处理聊天消息，替换变量并转换为ChatMessage对象
	 * @param messages 原始消息配置
	 * @param variables 变量字典
	 * @return 处理后的ChatMessage列表
	 */
	private List<ChatMessage> processChatMessages(List<AiLLMNode.Message> messages, Dict variables) {
		return messages.stream().map(msg -> {
			// 使用模板引擎渲染消息内容，替换变量
			String renderedContent = engine.getTemplate(msg.getContent()).render(variables);

			// 根据消息角色创建对应类型的ChatMessage
			if (msg.getRole().equals(ChatMessageType.AI.name())) {
				return AiMessage.aiMessage(renderedContent);
			}
			return UserMessage.from(renderedContent);
		}).collect(Collectors.toCollection(ArrayList::new));
	}

	/**
	 * 处理视觉模型请求。 本方法内部将获取视觉AI助手，解码参数，获取远程文件，添加图片消息至聊天消息列表， 并调用模型处理聊天消息，最终返回格式化的处理结果。
	 * @param chatMessages 处理后的聊天消息列表
	 * @param modelConfig 视觉模型配置信息
	 * @param context 流程上下文
	 * @return 处理后的结果字典
	 * @throws IOException 当获取远程文件或处理流时发生异常
	 */
	private Dict processVisionModel(List<ChatMessage> chatMessages, AiLLMNode.Model modelConfig,
			FlowContextHolder context) throws IOException {
		// 获取视觉AI助手
		Pair<ChatModel, AiAssistantService> aiVisionAssistant = modelProvider.getAiVisionAssistant();

		Object parameter = context.getParameter(modelConfig.getPicUrl());

		Response fileResponse = remoteFileService.getFile(parameter.toString());

		// 添加空图片消息（实际使用时应替换为实际图片）
		String encodeToString = Base64.getEncoder().encodeToString(fileResponse.body().asInputStream().readAllBytes());

		ArrayList<ChatMessage> messages = new ArrayList<>(chatMessages);
		messages.add(UserMessage.from(ImageContent.from(encodeToString, MediaType.IMAGE_PNG_VALUE)));
		// 调用模型并获取响应
		ChatResponse chatResponse = aiVisionAssistant.getKey().chat(messages);

		// 返回处理结果
		return formatResponse(chatResponse);
	}

	/**
	 * 处理普通文本模型请求
	 * @param chatMessages 聊天消息列表
	 * @param modelConfig 模型配置信息
	 * @param context 流程上下文
	 * @param node
	 * @return 处理结果字典
	 */
	private Dict processRegularModel(List<ChatMessage> chatMessages, AiLLMNode.Model modelConfig,
			FlowContextHolder context, AiNodeDefinition node) {

		Pair<StreamingChatModel, AiStreamAssistantService> assistantServicePair = modelProvider
			.getAiStreamAssistant(modelConfig.getPicUrl());

		ChatMemory chatMemory = chatMemoryAdvisorProvider.get(context.getConversationId());
		chatMemory.add(chatMessages);

		CountDownLatch latch = new CountDownLatch(1);
		final ChatResponse[] finalResponse = new ChatResponse[1];

		assistantServicePair.getKey().chat(chatMemory.messages(), new StreamingChatResponseHandler() {
			@Override
			public void onPartialResponse(String msg) {
				context.getAiFlowExecuteDTO()
					.getCallback()
					.execute(AiFlowExecuteDTO.FlowCallbackResult.builder()
						.data(AiFlowExecuteDTO.FlowCallbackData.builder().content(msg).build())
						.build());
			}

			@Override
			public void onCompleteResponse(ChatResponse chatResponse) {
				finalResponse[0] = chatResponse;

				if (chatResponse != null) {
					chatMemory.add(chatResponse.aiMessage());
				}

				AiFlowExecuteDTO.FlowCallbackData.FlowCallbackDataBuilder callbackDataBuilder = AiFlowExecuteDTO.FlowCallbackData
					.builder()
					.content(END_MSG);

				if (chatResponse != null && chatResponse.tokenUsage() != null) {
					callbackDataBuilder.tokens(chatResponse.tokenUsage().totalTokenCount());
					callbackDataBuilder.duration(context.getDuration());
					node.setStatus(ExecutionStatusEnums.SUCCESS.getValue());
					node.setDuration(context.getDuration());
					node.setTokens(chatResponse.tokenUsage().totalTokenCount());
					context.getExecutedNodes().add(node);
					callbackDataBuilder.nodes(context.getExecutedNodes());
				}

				context.getAiFlowExecuteDTO()
					.getCallback()
					.execute(AiFlowExecuteDTO.FlowCallbackResult.builder().data(callbackDataBuilder.build()).build());

				latch.countDown();
			}

			@Override
			public void onError(Throwable throwable) {
				log.error("流式聊天发生错误", throwable);
				latch.countDown();
			}
		});

		try {
			latch.await(5, TimeUnit.MINUTES);
			return finalResponse[0] != null ? formatResponse(finalResponse[0])
					: Dict.create()
						.set(FlowConstant.CONTENT, "")
						.set(FlowConstant.TIMESTAMP, System.currentTimeMillis());
		}
		catch (InterruptedException e) {
			Thread.currentThread().interrupt();
			return Dict.create().set(FlowConstant.CONTENT, "").set(FlowConstant.TIMESTAMP, System.currentTimeMillis());
		}
	}

	/**
	 * 格式化模型响应结果
	 * @param chatResponse 模型响应
	 * @return 格式化后的结果字典
	 */
	private Dict formatResponse(ChatResponse chatResponse) {
		// 提取响应信息
		Integer totalTokens = chatResponse.tokenUsage().totalTokenCount();
		String content = chatResponse.aiMessage().text();
		String role = chatResponse.aiMessage().type().name();

		// 构建结果字典
		return Dict.create()
			.set(FlowConstant.CONTENT, content)
			.set(FlowConstant.ROLE, role)
			.set(FlowConstant.TOKENS, totalTokens)
			.set(FlowConstant.TIMESTAMP, System.currentTimeMillis());
	}

}
