package com.example.xunai.service.ai.impl;

import com.example.xunai.dto.request.ai.ChatRequest;
import com.example.xunai.dto.request.ai.StreamChatRequest;
import com.example.xunai.dto.request.ai.BatchChatRequest;
import com.example.xunai.dto.response.ai.ChatResponse;
import com.example.xunai.dto.response.ai.StreamChatResponse;
import com.example.xunai.dto.response.ai.BatchChatResponse;
import com.example.xunai.dto.response.system.ModelConfigResponse;
import com.example.xunai.service.ai.*;
import com.example.xunai.service.system.SystemHealthService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;

import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

@Slf4j
@Service
@RequiredArgsConstructor
public class AiChatServiceImpl implements AiChatService {

    private final StreamChatService streamChatService;
    private final SyncChatService syncChatService;
    private final BatchChatService batchChatService;
    private final ModelConfigService modelConfigService;
    private final SystemHealthService systemHealthService;

    // ==================== 流式聊天方法 ====================

    @Override
    public SseEmitter streamChat(StreamChatRequest request) {
        log.info("AiChatService - 流式聊天，请求: {}", request);
        return streamChatService.streamChat(request);
    }

    @Override
    public StreamChatResponse streamChatWithResponse(StreamChatRequest request) {
        log.info("AiChatService - 流式聊天（返回响应对象），请求: {}", request);
        SseEmitter emitter = streamChatService.streamChat(request);
        return StreamChatResponse.builder()
                .conversationId(request.getConversationId())
                .sessionId(request.getSessionId())
                .modelType(request.getModelType())
                .isStreaming(true)
                .requestId(request.getRequestId())
                .eventType("start")
                .build();
    }

    // ==================== 同步聊天方法 ====================

    @Override
    public ChatResponse getAiResponse(ChatRequest request) {
        log.info("AiChatService - 同步聊天，请求: {}", request.toLogString());
        String content = syncChatService.getAiResponse(request);
        return ChatResponse.builder()
                .content(content)
                .conversationId(request.getConversationId())
                .modelType(request.getModelType())
                .build();
    }

    @Override
    public String getAiResponseAsync(ChatRequest request) {
        log.info("AiChatService - 开始异步聊天，请求: {}", request.toLogString());

        if (!request.isValid()) {
            throw new IllegalArgumentException("聊天请求参数无效: " + request);
        }

        // 异步聊天暂时使用同步实现，后续可以进一步拆分
        return getAiResponse(request).getContent();
    }

    // ==================== 批量聊天方法 ====================

    @Override
    public BatchChatResponse batchChat(BatchChatRequest request) {
        log.info("AiChatService - 批量聊天请求，数量: {}", request.getRequestCount());
        List<String> responses = batchChatService.batchChat(request.getRequests());

        // 转换为ChatResponse列表
        List<ChatResponse> chatResponses = responses.stream()
                .map(content -> ChatResponse.builder()
                        .content(content)
                        .modelType(request.getRequests().get(0).getModelType()) // 使用第一个请求的模型类型
                        .build())
                .collect(Collectors.toList());

        int successCount = (int) responses.stream()
                .filter(response -> response != null && !response.startsWith("处理失败"))
                .count();
        int failureCount = request.getRequestCount() - successCount;

        return BatchChatResponse.completed(
                request.getBatchId(),
                request.getRequestCount(),
                successCount,
                failureCount,
                chatResponses,
                System.currentTimeMillis() // 简化处理时间
        );
    }

    // ==================== 流式控制方法 ====================

    @Override
    public void setStreamStopFlag(String sessionId) {
        log.info("AiChatService - 设置停止标志，sessionId: {}", sessionId);
        streamChatService.setStreamStopFlag(sessionId);
    }

    @Override
    public boolean shouldStopStream(String sessionId) {
        return streamChatService.shouldStopStream(sessionId);
    }

    @Override
    public void clearStreamStopFlag(String sessionId) {
        log.info("AiChatService - 清除停止标志，sessionId: {}", sessionId);
        streamChatService.clearStreamStopFlag(sessionId);
    }

    // ==================== 其他代理方法 ====================

    @Override
    public String getAsyncResult(String requestId) {
        // 这个方法暂时保留，后续可以进一步拆分到专门的异步服务
        log.info("AiChatService - 获取异步结果，requestId: {}", requestId);
        return "异步结果获取功能待实现 - RequestId: " + requestId;
    }

    @Override
    public ModelConfigResponse getSupportedModels() {
        log.info("AiChatService - 获取支持的模型列表");
        List<Map<String, Object>> models = modelConfigService.getSupportedModels();

        List<ModelConfigResponse.ModelInfo> modelInfos = models.stream()
                .map(model -> ModelConfigResponse.ModelInfo.builder()
                        .code((String) model.get("code"))
                        .name((String) model.get("name"))
                        .description((String) model.get("description"))
                        .enabled((Boolean) model.get("enabled"))
                        .maxTokens((Integer) model.get("maxTokens"))
                        .temperature((Double) model.get("temperature"))
                        .streaming((Boolean) model.get("streaming"))
                        .configured(modelConfigService.isModelConfigured((String) model.get("code")))
                        .config(modelConfigService.getModelConfig((String) model.get("code")))
                        .build())
                .collect(Collectors.toList());

        return ModelConfigResponse.of(
                modelConfigService.getDefaultModel(),
                modelInfos.size(),
                modelInfos,
                modelConfigService.getConfigSummary()
        );
    }

    @Override
    public List<String> getStreamingMessages(Long conversationId) {
        log.info("AiChatService - 获取流式消息，conversationId: {}", conversationId);
        return streamChatService.getStreamingMessages(conversationId);
    }

    @Override
    public String getDefaultModel() {
        return modelConfigService.getDefaultModel();
    }

    @Override
    public boolean isModelAvailable(String modelType) {
        return modelConfigService.isModelAvailable(modelType);
    }

    @Override
    public Map<String, Object> getModelConfig(String modelType) {
        return modelConfigService.getModelConfig(modelType);
    }

    @Override
    public Map<String, Object> healthCheck() {
        return systemHealthService.healthCheck();
    }

    @Override
    public boolean isModelConfigured(String modelType) {
        return modelConfigService.isModelConfigured(modelType);
    }

    @Override
    public List<String> getConfiguredModelTypes() {
        return modelConfigService.getConfiguredModelTypes();
    }

    @Override
    public Map<String, Object> getConfigSummary() {
        return modelConfigService.getConfigSummary();
    }
}