package net.cyue.ort.llm;

import net.cyue.ort.ModelSessionManager;
import net.cyue.ort.llm.config.ModelConfig;
import net.cyue.ort.llm.generator.GenerationConfig;
import net.cyue.ort.llm.template.ModelChatTemplate;
import net.cyue.ort.llm.tokenizer.Tokenizer;
import net.cyue.ort.llm.cache.CacheManager;
import net.cyue.ort.llm.cache.OnnxCacheManager;
import net.cyue.ort.llm.generator.TextGenerator;
import net.cyue.ort.llm.generator.AdaptiveTextGenerator;
import net.cyue.ort.llm.sampling.GreedySamplingStrategy;
import net.cyue.ort.llm.sampling.RandomSamplingStrategy;
import net.cyue.ort.llm.sampling.SamplingStrategy;
import net.cyue.ort.llm.util.DefaultTokenManager;
import net.cyue.ort.llm.util.TokenManager;
import net.cyue.ort.llm.util.DefaultPromptEngine;
import net.cyue.ort.llm.util.PromptEngine;
import net.cyue.ort.llm.util.PromptPreprocessor;

import java.nio.ByteBuffer;
import java.util.List;

/**
 * LLM工厂类
 * 负责创建和组装LLM客户端
 * 实现依赖注入和工厂模式
 */
public class LLMFactory {
    
    /**
     * 创建LLM客户端
     */
    public static LLMClient createLLM(
        Tokenizer tokenizer,
        ModelChatTemplate chatTemplate,
        List<ByteBuffer> modelBuffers,
        ModelConfig modelConfig
    ) {
        // 1. 创建模型会话管理器
        ModelSessionManager sessionManager = new ModelSessionManager(modelBuffers);
        
        // 2. 创建适配器和基础服务
        CacheManager cacheManager = new OnnxCacheManager(sessionManager);
        
        // 创建TokenManager，设置EOS token ID（如果可用）
        long[] eosTokenIDs = null;
        if (modelConfig.eosTokenID != null) {
            eosTokenIDs = new long[]{modelConfig.eosTokenID};
        }
        TokenManager tokenManager = new DefaultTokenManager(tokenizer, eosTokenIDs);
        
        PromptPreprocessor preprocessor = new PromptPreprocessor(chatTemplate);
        PromptEngine promptEngine = new DefaultPromptEngine(preprocessor);
        
        // 3. 创建模型适配器
        ModelAdapter modelAdapter = new OnnxModelAdapter(
            sessionManager,
            modelConfig,
            cacheManager
        );
        
        // 4. 创建采样策略（用于候选token选择）
        SamplingStrategy samplingStrategy = new RandomSamplingStrategy();
        
        // 5. 创建自适应文本生成器（根据配置自动选择采样或Beam搜索）
        TextGenerator textGenerator = new AdaptiveTextGenerator(
            modelAdapter,
            tokenManager,
            cacheManager,
            samplingStrategy
        );
        
        // 6. 组装高层API
        return new DefaultLLMClient(
            textGenerator,
            promptEngine,
            tokenManager,
            () -> {
                modelAdapter.close();
                cacheManager.close();
            }
        );
    }
    
    /**
     * 创建采样策略（根据生成配置）
     * 根据配置参数自动选择最适合的采样策略
     */
    public static SamplingStrategy createSamplingStrategy(GenerationConfig genConfig) {
        if (genConfig.getTemperature() == 0 || !genConfig.isDoSample()) {
            return new GreedySamplingStrategy();
        }
        return new RandomSamplingStrategy();
    }
}

