package com.boulderai.mcp.service;

import com.boulderai.mcp.llm.LlmClient;
import com.boulderai.mcp.llm.LlmClientFactory;
import com.boulderai.mcp.model.Tool;
import com.boulderai.mcp.model.llm.*;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;

/**
 * 大模型与MCP集成服务
 * 负责协调大模型和MCP服务器之间的交互
 */
@Service
public class LlmMcpService {
    
    private static final Logger logger = LoggerFactory.getLogger(LlmMcpService.class);
    
    @Autowired
    private McpService mcpService;
    
    @Autowired
    private LlmClientFactory llmClientFactory;
    
    private final ObjectMapper objectMapper = new ObjectMapper();
    
    /**
     * 与大模型进行对话，自动处理工具调用
     * @param sessionId MCP会话ID
     * @param clientType 大模型客户端类型
     * @param model 模型名称
     * @param messages 对话消息
     * @param clientConfig 客户端配置
     * @return 对话响应
     */
    public Mono<LlmResponse> chatWithMcp(String sessionId, String clientType, String model, 
                                        List<LlmMessage> messages, Map<String, String> clientConfig) {
        
        logger.info("Starting chat with MCP integration: sessionId={}, clientType={}, model={}", 
                sessionId, clientType, model);
        
        try {
            // 创建大模型客户端
            LlmClient llmClient = createLlmClient(clientType, clientConfig);
            
            // 获取MCP工具并转换为大模型工具格式
            return getMcpToolsAsLlmTools(sessionId)
                    .flatMap(tools -> {
                        // 构建请求
                        LlmRequest request = new LlmRequest(model, messages);
                        request.setTools(tools);
                        request.setToolChoice("auto"); // 自动选择是否使用工具
                        
                        // 发送请求并处理响应
                        return processLlmResponseWithMcp(sessionId, llmClient, request, messages);
                    });
                    
        } catch (Exception e) {
            logger.error("Error in chat with MCP integration", e);
            return Mono.error(e);
        }
    }
    
    /**
     * 流式对话，支持工具调用
     */
    public Flux<LlmResponse> chatStreamWithMcp(String sessionId, String clientType, String model, 
                                              List<LlmMessage> messages, Map<String, String> clientConfig) {
        
        logger.info("Starting streaming chat with MCP integration: sessionId={}, clientType={}, model={}", 
                sessionId, clientType, model);
        
        try {
            LlmClient llmClient = createLlmClient(clientType, clientConfig);
            
            return getMcpToolsAsLlmTools(sessionId)
                    .flatMapMany(tools -> {
                        LlmRequest request = new LlmRequest(model, messages);
                        request.setTools(tools);
                        request.setToolChoice("auto");
                        
                        return llmClient.chatCompletionStream(request)
                                .collectList()
                                .flatMapMany(responses -> {
                                    // 处理流式响应中的工具调用
                                    return processStreamResponseWithMcp(sessionId, llmClient, responses, messages);
                                });
                    });
                    
        } catch (Exception e) {
            logger.error("Error in streaming chat with MCP integration", e);
            return Flux.error(e);
        }
    }
    
    /**
     * 获取MCP工具并转换为大模型工具格式
     */
    private Mono<List<LlmTool>> getMcpToolsAsLlmTools(String sessionId) {
        return Mono.fromFuture(mcpService.listTools(sessionId))
                .map(this::convertMcpToolsToLlmTools)
                .onErrorReturn(new ArrayList<>());
    }
    
    /**
     * 转换MCP工具为大模型工具格式
     */
    private List<LlmTool> convertMcpToolsToLlmTools(List<Tool> mcpTools) {
        return mcpTools.stream()
                .map(this::convertMcpToolToLlmTool)
                .collect(Collectors.toList());
    }
    
    /**
     * 转换单个MCP工具为大模型工具
     */
    private LlmTool convertMcpToolToLlmTool(Tool mcpTool) {
        try {
            // 构建参数schema
            JsonNode parametersSchema = objectMapper.createObjectNode()
                    .put("type", "object")
                    .set("properties", mcpTool.getInputSchema() != null ? 
                            mcpTool.getInputSchema().get("properties") : objectMapper.createObjectNode());
            
            return LlmTool.createFunction(
                    mcpTool.getName(),
                    mcpTool.getDescription(),
                    parametersSchema
            );
        } catch (Exception e) {
            logger.warn("Failed to convert MCP tool to LLM tool: {}", mcpTool.getName(), e);
            // 返回一个基本的工具定义
            return LlmTool.createFunction(
                    mcpTool.getName(),
                    mcpTool.getDescription(),
                    objectMapper.createObjectNode().put("type", "object")
            );
        }
    }
    
    /**
     * 处理大模型响应，执行工具调用
     */
    private Mono<LlmResponse> processLlmResponseWithMcp(String sessionId, LlmClient llmClient, 
                                                       LlmRequest originalRequest, List<LlmMessage> conversationHistory) {
        
        return llmClient.chatCompletion(originalRequest)
                .flatMap(response -> {
                    if (response.getChoices() == null || response.getChoices().isEmpty()) {
                        return Mono.just(response);
                    }
                    
                    LlmResponse.LlmChoice choice = response.getChoices().get(0);
                    LlmMessage assistantMessage = choice.getMessage();
                    
                    // 检查是否有工具调用
                    if (assistantMessage.getToolCalls() != null && !assistantMessage.getToolCalls().isEmpty()) {
                        return executeToolCallsAndContinue(sessionId, llmClient, originalRequest, 
                                conversationHistory, assistantMessage, response);
                    }
                    
                    return Mono.just(response);
                });
    }
    
    /**
     * 执行工具调用并继续对话
     */
    private Mono<LlmResponse> executeToolCallsAndContinue(String sessionId, LlmClient llmClient, 
                                                         LlmRequest originalRequest, List<LlmMessage> conversationHistory,
                                                         LlmMessage assistantMessage, LlmResponse originalResponse) {
        
        List<LlmMessage> newMessages = new ArrayList<>(conversationHistory);
        newMessages.add(assistantMessage);
        
        // 执行所有工具调用
        List<Mono<LlmMessage>> toolCallMonos = assistantMessage.getToolCalls().stream()
                .map(toolCall -> executeToolCall(sessionId, toolCall))
                .collect(Collectors.toList());
        
        return Flux.fromIterable(toolCallMonos)
                .flatMap(mono -> mono)
                .collectList()
                .flatMap(toolResults -> {
                    // 添加工具结果到对话历史
                    newMessages.addAll(toolResults);
                    
                    // 继续对话
                    LlmRequest continueRequest = new LlmRequest(originalRequest.getModel(), newMessages);
                    continueRequest.setTools(originalRequest.getTools());
                    continueRequest.setToolChoice(originalRequest.getToolChoice());
                    
                    return llmClient.chatCompletion(continueRequest);
                });
    }
    
    /**
     * 执行单个工具调用
     */
    private Mono<LlmMessage> executeToolCall(String sessionId, LlmToolCall toolCall) {
        try {
            String toolName = toolCall.getFunction().getName();
            String argumentsJson = toolCall.getFunction().getArguments();
            JsonNode arguments = objectMapper.readTree(argumentsJson);
            
            logger.debug("Executing tool call: toolName={}, arguments={}", toolName, argumentsJson);
            
            return Mono.fromFuture(mcpService.callTool(sessionId, toolName, arguments))
                    .map(result -> {
                        String resultJson = result.toString();
                        return LlmMessage.toolMessage(resultJson, toolCall.getId());
                    })
                    .onErrorReturn(LlmMessage.toolMessage(
                            "Error executing tool: " + toolName, toolCall.getId()));
                            
        } catch (Exception e) {
            logger.error("Error executing tool call: {}", toolCall.getFunction().getName(), e);
            return Mono.just(LlmMessage.toolMessage(
                    "Error executing tool: " + e.getMessage(), toolCall.getId()));
        }
    }
    
    /**
     * 处理流式响应中的工具调用
     */
    private Flux<LlmResponse> processStreamResponseWithMcp(String sessionId, LlmClient llmClient, 
                                                          List<LlmResponse> responses, List<LlmMessage> conversationHistory) {
        // 简化处理：对于流式响应，暂时不支持工具调用
        // 可以在后续版本中实现更复杂的流式工具调用逻辑
        return Flux.fromIterable(responses);
    }
    
    /**
     * 创建大模型客户端
     */
    private LlmClient createLlmClient(String clientType, Map<String, String> clientConfig) {
        if (clientConfig != null && !clientConfig.isEmpty()) {
            return llmClientFactory.createClient(clientType, clientConfig);
        } else {
            return llmClientFactory.createClient(clientType);
        }
    }
}