package net.cyue.ort.llm.gui.config;

import net.cyue.ort.llm.LLMClient;
import net.cyue.ort.llm.LLMFactory;
import net.cyue.ort.llm.config.ModelConfig;
import net.cyue.ort.llm.generator.GenerationConfig;
import net.cyue.ort.llm.gui.state.AppState;
import net.cyue.ort.llm.template.ModelChatTemplate;
import net.cyue.ort.llm.tokenizer.RWKVTokenizer;
import net.cyue.ort.llm.tokenizer.TransformersTokenizer;
import net.cyue.ort.llm.tokenizer.TransformersTokenizerConfig;
import net.cyue.util.FileUtil;

import java.io.IOException;
import java.nio.file.Path;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * 模型加载的业务层，负责构建LLM实例并刷新全局状态。
 */
public class ModelLoader implements AutoCloseable {

    public interface Listener {
        void onStatusChanged(String status);

        default void onSuccess() {
        }

        default void onFailure(Exception ex) {
            ex.printStackTrace();
        }
    }

    private final AppState appState;
    private final ExecutorService executor =
            Executors.newSingleThreadExecutor(r -> {
                Thread thread = new Thread(r, "model-loader");
                thread.setDaemon(true);
                return thread;
            });

    public ModelLoader(AppState appState) {
        this.appState = appState;
    }

    public CompletableFuture<Void> loadAsync(SelectedModelFiles selectedFiles,
                                             ModelType modelType,
                                             Listener listener) {
        if (!selectedFiles.isComplete()) {
            listener.onStatusChanged("状态：请选择所有必需文件");
            return CompletableFuture.completedFuture(null);
        }

        SelectedModelFiles snapshot = selectedFiles.snapshot();
        listener.onStatusChanged("状态：正在加载模型...");
        return CompletableFuture.runAsync(() -> loadInternal(snapshot, modelType, listener), executor);
    }

    private void loadInternal(SelectedModelFiles files,
                              ModelType modelType,
                              Listener listener) {
        try {
            GenerationConfig generationConfig = prepareGenerationConfig(files.getGenerationConfig(), modelType);
            LLMClient llmClient = createLlmClient(files, modelType);

            appState.setGenerationConfig(generationConfig);
            appState.setLlmClient(llmClient);
            appState.resetHistory();

            listener.onStatusChanged("状态：模型加载完成");
            listener.onSuccess();
        } catch (Exception ex) {
            listener.onStatusChanged("状态：加载失败 - " + ex.getMessage());
            listener.onFailure(ex);
        }
    }

    private GenerationConfig prepareGenerationConfig(Path generationConfigPath,
                                                     ModelType modelType) throws IOException {
        GenerationConfig config = GenerationConfig.fromFile(generationConfigPath.toString())
                .setNumBeams(modelType == ModelType.TRANSFORMER ? 4 : 1)
                .setBeamDepth(3);
        return config;
    }

    private LLMClient createLlmClient(SelectedModelFiles files, ModelType modelType) throws IOException {
        ModelConfig modelConfig = ModelConfig.fromFile(files.getConfig().toString());
        List<java.nio.ByteBuffer> modelBuffers = FileUtil.readLargeFile(files.getModel().toString());
        
        if (modelType == ModelType.TRANSFORMER) {
            TransformersTokenizer tokenizer = new TransformersTokenizer(files.getTokenizer().toString());
            TransformersTokenizerConfig tokenizerConfig = TransformersTokenizerConfig.fromFile(
                    files.getTokenizerConfig().toString());
            ModelChatTemplate chatTemplate = tokenizerConfig.createChatTemplate(true);
            
            return LLMFactory.createLLM(
                    tokenizer,
                    chatTemplate,
                    modelBuffers,
                    modelConfig
            ).setDebug(true);
        } else {
            RWKVTokenizer tokenizer = new RWKVTokenizer(files.getTokenizer().toString());
            
            return LLMFactory.createLLM(
                    tokenizer,
                    null, // RWKV 模型没有聊天模板
                    modelBuffers,
                    modelConfig
            ).setDebug(true);
        }
    }

    @Override
    public void close() {
        executor.shutdownNow();
    }
}

