package com.videoweb.infrastructure.ai;

import com.videoweb.domain.agent.valueobject.ModelConfig;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.chat.StreamingChatModel;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.util.List;
import java.util.concurrent.CompletableFuture;

/**
 * 硅基流动API客户端
 * 封装对硅基流动AI服务的调用
 */
@Slf4j
@Service
public class SiliconFlowApiClient {
    
    @Value("${siliconflow.api.base-url}")
    private String baseUrl;
    
    @Value("${siliconflow.api.api-key}")
    private String apiKey;
    
    @Value("${siliconflow.api.default-model}")
    private String defaultModel;
    
    private ChatModel chatModel;
    private StreamingChatModel streamingChatModel;
    
    /**
     * 获取聊天模型实例
     */
    private ChatModel getChatModel(ModelConfig config) {
        if (chatModel == null || needsNewModel(config)) {
            chatModel = OpenAiChatModel.builder()
                    .apiKey(apiKey)
                    .baseUrl(baseUrl)
                    .modelName(config != null ? config.getModelName() : defaultModel)
                    .temperature(config != null ? config.getTemperatureAsDouble() : 0.7)
                    .maxTokens(config != null ? config.getMaxTokens() : 2000)
                    .topP(config != null ? config.getTopPAsDouble() : 1.0)
                    .build();
        }
        return chatModel;
    }
    
    /**
     * 获取流式聊天模型实例
     */
    private StreamingChatModel getStreamingChatModel(ModelConfig config) {
        if (streamingChatModel == null || needsNewModel(config)) {
            streamingChatModel = OpenAiStreamingChatModel.builder()
                    .apiKey(apiKey)
                    .baseUrl(baseUrl)
                    .modelName(config != null ? config.getModelName() : defaultModel)
                    .temperature(config != null ? config.getTemperatureAsDouble() : 0.7)
                    .maxTokens(config != null ? config.getMaxTokens() : 2000)
                    .topP(config != null ? config.getTopPAsDouble() : 1.0)
                    .build();
        }
        return streamingChatModel;
    }
    
    /**
     * 同步聊天
     */
    public String chat(String message, ModelConfig config) {
        try {
            log.info("发送同步聊天请求: {}", message.substring(0, Math.min(message.length(), 100)));
            
            ChatModel model = getChatModel(config);
            String response = model.chat(message);
            
            log.info("收到聊天响应，长度: {}", response.length());
            return response;
            
        } catch (Exception e) {
            log.error("同步聊天请求失败", e);
            throw new RuntimeException("AI服务调用失败: " + e.getMessage(), e);
        }
    }
    
    /**
     * 同步聊天（带系统提示词）
     */
    public String chatWithSystemPrompt(String systemPrompt, String userMessage, ModelConfig config) {
        try {
            log.info("发送带系统提示词的聊天请求");
            
            ChatModel model = getChatModel(config);
            
            // 构建消息列表
            List<ChatMessage> messages = List.of(
                dev.langchain4j.data.message.SystemMessage.from(systemPrompt),
                UserMessage.from(userMessage)
            );
            
            ChatResponse response = model.chat(messages);
            String result = response.aiMessage().text();
            
            log.info("收到聊天响应，长度: {}", result.length());
            return result;
            
        } catch (Exception e) {
            log.error("带系统提示词的聊天请求失败", e);
            throw new RuntimeException("AI服务调用失败: " + e.getMessage(), e);
        }
    }
    
    /**
     * 多轮对话
     */
    public String chatWithHistory(List<ChatMessage> messageHistory, ModelConfig config) {
        try {
            log.info("发送多轮对话请求，消息数量: {}", messageHistory.size());
            
            ChatModel model = getChatModel(config);
            ChatResponse response = model.chat(messageHistory);
            String result = response.aiMessage().text();
            
            log.info("收到多轮对话响应，长度: {}", result.length());
            return result;
            
        } catch (Exception e) {
            log.error("多轮对话请求失败", e);
            throw new RuntimeException("AI服务调用失败: " + e.getMessage(), e);
        }
    }
    
    /**
     * 流式聊天
     */
    public CompletableFuture<String> streamChat(String message, ModelConfig config, StreamingChatResponseHandler handler) {
        try {
            log.info("发送流式聊天请求: {}", message.substring(0, Math.min(message.length(), 100)));
            
            StreamingChatModel model = getStreamingChatModel(config);
            CompletableFuture<ChatResponse> future = new CompletableFuture<>();
            
            // 包装处理器以添加日志
            StreamingChatResponseHandler wrappedHandler = new StreamingChatResponseHandler() {
                private StringBuilder fullResponse = new StringBuilder();
                
                @Override
                public void onPartialResponse(String partialResponse) {
                    fullResponse.append(partialResponse);
                    handler.onPartialResponse(partialResponse);
                }
                
                @Override
                public void onCompleteResponse(ChatResponse completeResponse) {
                    log.info("流式聊天完成，总长度: {}", fullResponse.length());
                    handler.onCompleteResponse(completeResponse);
                    future.complete(completeResponse);
                }
                
                @Override
                public void onError(Throwable error) {
                    log.error("流式聊天失败", error);
                    handler.onError(error);
                    future.completeExceptionally(error);
                }
            };
            
            model.chat(message, wrappedHandler);
            
            return future.thenApply(response -> response.aiMessage().text());
            
        } catch (Exception e) {
            log.error("流式聊天请求失败", e);
            CompletableFuture<String> errorFuture = new CompletableFuture<>();
            errorFuture.completeExceptionally(new RuntimeException("AI服务调用失败: " + e.getMessage(), e));
            return errorFuture;
        }
    }
    
    /**
     * 流式多轮对话
     */
    public CompletableFuture<String> streamChatWithHistory(List<ChatMessage> messageHistory, ModelConfig config, StreamingChatResponseHandler handler) {
        try {
            log.info("发送流式多轮对话请求，消息数量: {}", messageHistory.size());
            
            StreamingChatModel model = getStreamingChatModel(config);
            CompletableFuture<ChatResponse> future = new CompletableFuture<>();
            
            // 包装处理器
            StreamingChatResponseHandler wrappedHandler = new StreamingChatResponseHandler() {
                private StringBuilder fullResponse = new StringBuilder();
                
                @Override
                public void onPartialResponse(String partialResponse) {
                    fullResponse.append(partialResponse);
                    handler.onPartialResponse(partialResponse);
                }
                
                @Override
                public void onCompleteResponse(ChatResponse completeResponse) {
                    log.info("流式多轮对话完成，总长度: {}", fullResponse.length());
                    handler.onCompleteResponse(completeResponse);
                    future.complete(completeResponse);
                }
                
                @Override
                public void onError(Throwable error) {
                    log.error("流式多轮对话失败", error);
                    handler.onError(error);
                    future.completeExceptionally(error);
                }
            };
            
            model.chat(messageHistory, wrappedHandler);
            
            return future.thenApply(response -> response.aiMessage().text());
            
        } catch (Exception e) {
            log.error("流式多轮对话请求失败", e);
            CompletableFuture<String> errorFuture = new CompletableFuture<>();
            errorFuture.completeExceptionally(new RuntimeException("AI服务调用失败: " + e.getMessage(), e));
            return errorFuture;
        }
    }
    
    /**
     * 健康检查
     */
    public boolean isHealthy() {
        try {
            ChatModel model = getChatModel(ModelConfig.createDefault());
            String response = model.chat("你好");
            return response != null && !response.trim().isEmpty();
        } catch (Exception e) {
            log.warn("AI服务健康检查失败", e);
            return false;
        }
    }
    
    /**
     * 检查是否需要创建新的模型实例
     */
    private boolean needsNewModel(ModelConfig config) {
        // 简单实现：每次都创建新实例以确保配置生效
        // 在生产环境中可以优化为配置变更时才创建新实例
        return true;
    }
    
    /**
     * 估算消息的token数量
     */
    public int estimateTokenCount(String message) {
        if (message == null || message.isEmpty()) {
            return 0;
        }
        
        // 简单估算：中文字符2个token，英文字符0.75个token
        int chineseCount = 0;
        int englishCount = 0;
        
        for (char c : message.toCharArray()) {
            if (c >= 0x4e00 && c <= 0x9fff) {
                chineseCount++;
            } else {
                englishCount++;
            }
        }
        
        return (int) (chineseCount * 2 + englishCount * 0.75);
    }
}