package net.cyue.ort.llm.generator;

import net.cyue.ort.llm.data.Token;
import net.cyue.ort.llm.ModelAdapter;
import net.cyue.ort.llm.cache.CacheManager;
import net.cyue.ort.llm.ModelException;
import net.cyue.ort.llm.data.ModelInput;
import net.cyue.ort.llm.data.ModelOutput;
import net.cyue.ort.llm.sampling.SamplingConfig;
import net.cyue.ort.llm.sampling.SamplingStrategy;
import net.cyue.ort.llm.util.TokenManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;

/**
 * 采样文本生成器实现
 * 使用采样策略生成文本
 */
public class SamplingTextGenerator implements TextGenerator {
    private static final Logger logger = LoggerFactory.getLogger(SamplingTextGenerator.class);
    
    private final ModelAdapter model;
    private final TokenManager tokenManager;
    private final CacheManager cacheManager;
    private final SamplingStrategy samplingStrategy;
    
    public SamplingTextGenerator(
        ModelAdapter model,
        TokenManager tokenManager,
        CacheManager cacheManager,
        SamplingStrategy samplingStrategy
    ) {
        this.model = model;
        this.tokenManager = tokenManager;
        this.cacheManager = cacheManager;
        this.samplingStrategy = samplingStrategy;
    }
    
    @Override
    public String generate(String prompt, GenerationConfig config, GenerationCallback callback) {
        long startTime = System.currentTimeMillis();
        
        if (logger.isDebugEnabled()) {
            logger.debug("╔═══════════════════════════════════════════════════════════════╗");
            logger.debug("║          SamplingTextGenerator - 开始生成                    ║");
            logger.debug("╚═══════════════════════════════════════════════════════════════╝");
            logger.debug("📝 Prompt (长度: {}): {}", prompt.length(), 
                prompt.length() > 100 ? prompt.substring(0, 100) + "..." : prompt);
            logger.debug("⚙️  配置参数:");
            logger.debug("   • maxNewTokens: {}", config.getMaxNewTokens());
            logger.debug("   • temperature: {}", config.getTemperature());
            logger.debug("   • topK: {}", config.getTopK());
            logger.debug("   • topP: {}", config.getTopP());
            logger.debug("   • doSample: {}", config.isDoSample());
            logger.debug("   • repetitionPenalty: {}", config.getRepetitionPenalty());
        }
        
        List<Long> inputTokens = tokenManager.tokenize(prompt);
        Map<String, Object> cache = cacheManager.createEmptyCache(1, 0, model.getConfig());
        List<Long> generatedTokens = new ArrayList<>();
        StringBuilder output = new StringBuilder();
        SimpleCallbackState state = new SimpleCallbackState(callback);
        
        // 与LLM.java保持一致，记录所有输出token ID
        List<Long> outputIDList = new ArrayList<>();
        
        if (logger.isDebugEnabled()) {
            logger.debug("🔢 输入信息:");
            logger.debug("   • Token数量: {}", inputTokens.size());
            if (inputTokens.size() <= 20) {
                logger.debug("   • Token IDs: {}", inputTokens);
            } else {
                logger.debug("   • Token IDs (前10): {}", inputTokens.subList(0, 10));
                logger.debug("   • Token IDs (后10): {}", inputTokens.subList(inputTokens.size() - 10, inputTokens.size()));
            }
            logger.debug("   • 使用缓存: {}", model.getConfig().isUseCache() ? "是" : "否");
        }
        
        try {
            for (int i = 0; i < config.getMaxNewTokens(); i++) {
                long stepStartTime = System.currentTimeMillis();
                boolean isFirstRun = i == 0;
                
                // 与LLM.java保持一致，计算seqLen
                int seqLen = isFirstRun || !model.getConfig().isUseCache()
                        ? inputTokens.size() + generatedTokens.size()
                        : 1;
                
                // 计算pastSeqLen，与LLM.java保持一致
                int pastSeqLen = isFirstRun ? 0 : inputTokens.size() + generatedTokens.size() - 1;
                
                // 准备输入，与LLM.java保持一致
                List<Long> inputIDList = new ArrayList<>(inputTokens);
                inputIDList.addAll(generatedTokens);
                
                if (logger.isDebugEnabled()) {
                    logger.debug("");
                    logger.debug("┌─────────────────────────────────────────────────────────┐");
                    logger.debug("│  🔄 Step #{}                                            │", i + 1);
                    logger.debug("└─────────────────────────────────────────────────────────┘");
                    logger.debug("   📊 状态: isFirstRun={}, seqLen={}, pastSeqLen={}, 输入序列长度={}", 
                        isFirstRun, seqLen, pastSeqLen, inputIDList.size());
                }
                
                ModelInput modelInput = new ModelInput(
                    seqLen == 1 ? Collections.singletonList(
                        generatedTokens.isEmpty() ? inputTokens.get(inputTokens.size() - 1) : generatedTokens.get(generatedTokens.size() - 1)
                    ) : inputIDList,
                    cache,
                    isFirstRun,
                    pastSeqLen
                );
                
                long inferenceStartTime = System.currentTimeMillis();
                ModelOutput result = model.runInference(modelInput);
                long inferenceDuration = System.currentTimeMillis() - inferenceStartTime;
                cache = cacheManager.extractFromResult(result);
                
                // 获取最后一个token的logits
                float[] logits = result.getLastTokenLogits();
                
                if (logger.isDebugEnabled()) {
                    logger.debug("   🧠 推理结果: 耗时={}ms, Logits长度={}", inferenceDuration, logits.length);
                    if (logits.length > 0) {
                        // 找到logits中的最大值和最小值
                        float maxLogit = Float.NEGATIVE_INFINITY;
                        float minLogit = Float.POSITIVE_INFINITY;
                        for (float logit : logits) {
                            if (logit > maxLogit) maxLogit = logit;
                            if (logit < minLogit) minLogit = logit;
                        }
                        logger.debug("      • Logits范围: [{}, {}]", 
                            String.format("%.2f", minLogit), String.format("%.2f", maxLogit));
                    }
                }
                
                // 从LLMGenerationConfig创建SamplingConfig
                SamplingConfig samplingConfig = SamplingConfig.fromGenerationConfig(config);
                
                // 使用完整的上下文进行采样，与LLM.java保持一致
                long samplingStartTime = System.currentTimeMillis();
                Token nextToken = samplingStrategy.selectNextToken(logits, inputIDList, samplingConfig);
                long samplingDuration = System.currentTimeMillis() - samplingStartTime;
                
                generatedTokens.add(nextToken.getId());
                outputIDList.add(nextToken.getId());
                
                // 只解码新生成的token，与LLM.java保持一致
                String text = tokenManager.detokenize(Collections.singletonList(nextToken.getId()));
                output.append(text);
                state.appendText(text);
                
                if (logger.isDebugEnabled()) {
                    logger.debug("   🎲 采样结果: 耗时={}ms", samplingDuration);
                    logger.debug("   ✅ 选择结果:");
                    logger.debug("      • Token ID: {}", nextToken.getId());
                    logger.debug("      • 文本: '{}'", text.replace("\n", "\\n").replace("\r", "\\r"));
                    logger.debug("      • 当前生成Token数: {}, 累计输出长度: {} 字符", 
                        generatedTokens.size(), output.length());
                }
                
                // 检查是否结束，与LLM.java保持一致
                Long eosTokenID = model.getConfig().getEosTokenID();
                if (eosTokenID != null && outputIDList.contains(eosTokenID)) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("   🛑 检测到EOS token (ID: {})，提前结束生成", eosTokenID);
                    }
                    break;
                }
                if (generatedTokens.size() >= config.getMaxNewTokens()) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("   ⏹️  达到最大Token数量限制，结束生成");
                    }
                    break;
                }
                
                long stepDuration = System.currentTimeMillis() - stepStartTime;
                if (logger.isDebugEnabled()) {
                    logger.debug("   ⏱️  步骤总耗时: {}ms", stepDuration);
                }
            }
            
            state.finish();
            long totalDuration = System.currentTimeMillis() - startTime;
            if (logger.isDebugEnabled()) {
                logger.debug("");
                logger.debug("╔═══════════════════════════════════════════════════════════════╗");
                logger.debug("║          SamplingTextGenerator - 生成完成                    ║");
                logger.debug("╚═══════════════════════════════════════════════════════════════╝");
                logger.debug("📊 统计信息:");
                logger.debug("   • 总耗时: {}ms ({}s)", totalDuration, String.format("%.2f", totalDuration / 1000.0));
                logger.debug("   • 生成Token数量: {}", generatedTokens.size());
                logger.debug("   • 输出长度: {} 字符", output.length());
                if (generatedTokens.size() > 0) {
                    logger.debug("   • 平均每Token耗时: {}ms", 
                        String.format("%.2f", (double) totalDuration / generatedTokens.size()));
                }
                if (generatedTokens.size() <= 50) {
                    logger.debug("   • 生成的Token IDs: {}", generatedTokens);
                } else {
                    logger.debug("   • 生成的Token IDs (前20): {}", generatedTokens.subList(0, 20));
                    logger.debug("   • 生成的Token IDs (后20): {}", 
                        generatedTokens.subList(generatedTokens.size() - 20, generatedTokens.size()));
                }
                String finalOutput = output.toString().trim();
                if (finalOutput.length() > 200) {
                    logger.debug("📄 最终输出 (前200字符): {}", finalOutput.substring(0, 200) + "...");
                } else {
                    logger.debug("📄 最终输出: {}", finalOutput);
                }
            }
            
            return output.toString().trim();
        } catch (ModelException e) {
            state.error(e);
            logger.error("生成失败", e);
            throw new GenerationException("Generation failed", e);
        } finally {
            cacheManager.cleanupCache(cache);
        }
    }
    
    /**
     * 简化的回调状态管理类
     */
    private static class SimpleCallbackState {
        private final GenerationCallback callback;
        private final StringBuilder fullText = new StringBuilder();
        
        SimpleCallbackState(GenerationCallback callback) {
            this.callback = callback;
        }
        
        void appendText(String text) {
            if (text != null && !text.isEmpty()) {
                fullText.append(text);
                if (callback != null) {
                    callback.onTokenGenerated(text);
                }
            }
        }
        
        void finish() {
            if (callback != null) {
                callback.onComplete(fullText.toString());
            }
        }
        
        void error(Exception e) {
            if (callback != null) {
                callback.onError(e);
            }
        }
    }
}

