package com.allm.ai.common.strategy;

import com.allm.ai.common.dto.StreamingOutput;
import lombok.extern.slf4j.Slf4j;
import reactor.core.publisher.FluxSink;

/**
 * AI模型策略抽象基类
 * 提供流式内容分块输出的公共能力
 */
@Slf4j
public abstract class BaseAiModelStrategy implements AiModelStrategy {
    
    /**
     * 将内容分割成小块进行流式输出
     * @param content 要输出的内容
     * @param sink 流式输出sink
     * @param outputType 输出类型（THINKING 或 CONTENT）
     */
    protected void streamContentInChunks(String content, FluxSink<StreamingOutput> sink, String outputType) {
        if (content == null || content.trim().isEmpty()) {
            return;
        }
        
        // 按字符分割，每1-3个字符作为一个块
        int chunkSize = 2; // 1-2个token大约对应2-4个字符
        int delayMs = 50; // 每个块之间的延迟（毫秒）
        
        for (int i = 0; i < content.length(); i += chunkSize) {
            int endIndex = Math.min(i + chunkSize, content.length());
            String chunk = content.substring(i, endIndex);
            
            if (!chunk.trim().isEmpty()) {
                sink.next(new StreamingOutput(outputType, chunk));
                
                // 添加延迟以模拟真实的流式输出
                try {
                    Thread.sleep(delayMs);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
            }
        }
    }
    
    /**
     * 更智能的内容分块 - 按词分割
     * @param content 要输出的内容
     * @param sink 流式输出sink
     * @param outputType 输出类型（THINKING 或 CONTENT）
     */
    protected void streamContentInSmartChunks(String content, FluxSink<StreamingOutput> sink, String outputType) {
        if (content == null || content.trim().isEmpty()) {
            return;
        }
        
        // 按空格、标点符号分割，保持词的完整性
        String[] words = content.split("(?<=[\\s\\p{Punct}])|(?=[\\s\\p{Punct}])");
        int delayMs = 30; // 每个词块之间的延迟
        
        for (String word : words) {
            if (!word.trim().isEmpty()) {
                sink.next(new StreamingOutput(outputType, word));
                
                // 添加延迟
                try {
                    Thread.sleep(delayMs);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
            }
        }
    }
    
    /**
     * 基于Token估算的分块方法（推荐使用）
     * @param content 要输出的内容
     * @param sink 流式输出sink
     * @param outputType 输出类型（THINKING 或 CONTENT）
     */
    protected void streamContentByTokenEstimate(String content, FluxSink<StreamingOutput> sink, String outputType) {
        if (content == null || content.trim().isEmpty()) {
            return;
        }
        
        // 使用正则表达式按单词边界和标点符号拆分
        String[] tokens = content.split("(?<=\\s|\\p{Punct})|(?=\\s|\\p{Punct})");
        
        StringBuilder currentChunk = new StringBuilder();
        int estimatedTokens = 0;
        int targetTokensPerChunk = 2; // 目标：每块1-2个token
        int delayMs = 50;
        
        for (String token : tokens) {
            if (token.trim().isEmpty()) {
                currentChunk.append(token);
                continue;
            }
            
            // 估算当前token的token数
            int tokenCount = estimateTokenCount(token);
            
            // 如果添加这个token会超过目标，先输出当前块
            if (estimatedTokens + tokenCount > targetTokensPerChunk && !currentChunk.toString().trim().isEmpty()) {
                sink.next(new StreamingOutput(outputType, currentChunk.toString()));
                
                // 添加延迟
                try {
                    Thread.sleep(delayMs);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
                
                currentChunk.setLength(0);
                estimatedTokens = 0;
            }
            
            currentChunk.append(token);
            estimatedTokens += tokenCount;
        }
        
        // 输出剩余内容
        String remainingChunk = currentChunk.toString();
        if (!remainingChunk.trim().isEmpty()) {
            sink.next(new StreamingOutput(outputType, remainingChunk));
        }
    }
    
    /**
     * 估算token数量（优化版）
     */
    private int estimateTokenCount(String text) {
        if (text == null || text.trim().isEmpty()) {
            return 0;
        }
        
        int tokenCount = 0;
        boolean inEnglishWord = false;
        
        for (char c : text.toCharArray()) {
            if (isChinese(c)) {
                tokenCount += 1; // 中文字符约等于1个token
                inEnglishWord = false;
            } else if (Character.isLetter(c)) {
                if (!inEnglishWord) {
                    tokenCount += 1; // 新英文单词开始
                    inEnglishWord = true;
                }
                // 同一个英文单词内的其他字母不算token
            } else if (Character.isDigit(c)) {
                if (!inEnglishWord) {
                    tokenCount += 1; // 数字算1个token
                    inEnglishWord = true;
                }
            } else if (Character.isWhitespace(c) || isPunctuation(c)) {
                inEnglishWord = false;
                // 空格和标点符号通常不单独计算token
            } else {
                tokenCount += 1; // 其他字符算1个token
                inEnglishWord = false;
            }
        }
        
        // 最少算1个token
        return Math.max(1, tokenCount);
    }
    
    /**
     * 提取思考内容并分别处理 - 支持分块输出
     * @param text 原始文本
     * @param sink 流式输出sink
     * @param useChunking 是否使用分块输出
     */
    protected void extractThinkingContentWithChunking(String text, FluxSink<StreamingOutput> sink, boolean useChunking) {
        String lowerText = text.toLowerCase();
        
        // 检查是否包含思考标签
        if (lowerText.contains("<thinking>") || lowerText.contains("</thinking>")) {
            // 提取思考内容
            String thinkingContent = extractContentBetweenTags(text, "<thinking>", "</thinking>");
            if (thinkingContent != null && !thinkingContent.trim().isEmpty()) {
                if (useChunking) {
                    streamContentByTokenEstimate(thinkingContent.trim(), sink, StreamingOutput.THINKING);
                } else {
                    sink.next(new StreamingOutput(StreamingOutput.THINKING, thinkingContent.trim()));
                }
            }
            
            // 提取非思考内容
            String normalContent = extractContentOutsideTags(text, "<thinking>", "</thinking>");
            if (normalContent != null && !normalContent.trim().isEmpty()) {
                if (useChunking) {
                    streamContentByTokenEstimate(normalContent.trim(), sink, StreamingOutput.CONTENT);
                } else {
                    sink.next(new StreamingOutput(StreamingOutput.CONTENT, normalContent.trim()));
                }
            }
        } else if (lowerText.contains("思考：") || lowerText.contains("thinking:")) {
            // 处理中文思考标记
            String thinkingContent = extractContentAfterMarker(text, "思考：", "thinking:");
            if (thinkingContent != null && !thinkingContent.trim().isEmpty()) {
                if (useChunking) {
                    streamContentByTokenEstimate(thinkingContent.trim(), sink, StreamingOutput.THINKING);
                } else {
                    sink.next(new StreamingOutput(StreamingOutput.THINKING, thinkingContent.trim()));
                }
            }
            
            // 提取其余内容
            String normalContent = extractContentBeforeMarker(text, "思考：", "thinking:");
            if (normalContent != null && !normalContent.trim().isEmpty()) {
                if (useChunking) {
                    streamContentByTokenEstimate(normalContent.trim(), sink, StreamingOutput.CONTENT);
                } else {
                    sink.next(new StreamingOutput(StreamingOutput.CONTENT, normalContent.trim()));
                }
            }
        } else {
            // 普通内容
            if (useChunking) {
                streamContentByTokenEstimate(text, sink, StreamingOutput.CONTENT);
            } else {
                sink.next(new StreamingOutput(StreamingOutput.CONTENT, text));
            }
        }
    }
    
    /**
     * 提取标签之间的内容
     */
    protected String extractContentBetweenTags(String text, String startTag, String endTag) {
        int startIndex = text.toLowerCase().indexOf(startTag.toLowerCase());
        if (startIndex == -1) return null;
        
        startIndex += startTag.length();
        int endIndex = text.toLowerCase().indexOf(endTag.toLowerCase(), startIndex);
        if (endIndex == -1) return null;
        
        return text.substring(startIndex, endIndex);
    }
    
    /**
     * 提取标签外的内容
     */
    protected String extractContentOutsideTags(String text, String startTag, String endTag) {
        StringBuilder result = new StringBuilder();
        String lowerText = text.toLowerCase();
        int startIndex = 0;
        
        while (startIndex < text.length()) {
            int tagStart = lowerText.indexOf(startTag.toLowerCase(), startIndex);
            if (tagStart == -1) {
                result.append(text.substring(startIndex));
                break;
            }
            
            result.append(text.substring(startIndex, tagStart));
            
            int tagEnd = lowerText.indexOf(endTag.toLowerCase(), tagStart);
            if (tagEnd == -1) break;
            
            startIndex = tagEnd + endTag.length();
        }
        
        return result.toString();
    }
    
    /**
     * 提取标记后的内容
     */
    protected String extractContentAfterMarker(String text, String... markers) {
        for (String marker : markers) {
            int index = text.toLowerCase().indexOf(marker.toLowerCase());
            if (index != -1) {
                return text.substring(index + marker.length());
            }
        }
        return null;
    }
    
    /**
     * 提取标记前的内容
     */
    protected String extractContentBeforeMarker(String text, String... markers) {
        int minIndex = text.length();
        for (String marker : markers) {
            int index = text.toLowerCase().indexOf(marker.toLowerCase());
            if (index != -1 && index < minIndex) {
                minIndex = index;
            }
        }
        return minIndex < text.length() ? text.substring(0, minIndex) : null;
    }
    
    /**
     * 判断是否为中文字符
     */
    protected boolean isChinese(char c) {
        return (c >= 0x4E00 && c <= 0x9FFF) || // CJK统一汉字
               (c >= 0x3400 && c <= 0x4DBF) || // CJK扩展A
               (c >= 0x20000 && c <= 0x2A6DF); // CJK扩展B
    }
    
    /**
     * 判断是否为标点符号
     */
    protected boolean isPunctuation(char c) {
        return Character.getType(c) == Character.CONNECTOR_PUNCTUATION ||
               Character.getType(c) == Character.DASH_PUNCTUATION ||
               Character.getType(c) == Character.START_PUNCTUATION ||
               Character.getType(c) == Character.END_PUNCTUATION ||
               Character.getType(c) == Character.INITIAL_QUOTE_PUNCTUATION ||
               Character.getType(c) == Character.FINAL_QUOTE_PUNCTUATION ||
               Character.getType(c) == Character.OTHER_PUNCTUATION;
    }
}
