package com.vincent.llm;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;

import java.io.File;

public class MaxModelLenCalculator {
    // GPU 显存配置类
    static class GPUConfig {
        String modelType;     // 如 "RTX 3090"
        int vramMB;           // 显存总量（MB）
        float memoryUtil;     // gpu_memory_utilization
        String dtype;         // 如 "float16"
    }

    // 计算入口
    public static long calculate(GPUConfig gpu, File configFile, long paramCount) throws Exception {
        // 1. 解析 config.json
        JsonNode config = new ObjectMapper().readTree(configFile);
        int numLayers = config.get("num_hidden_layers").asInt();
        int hiddenSize = config.get("hidden_size").asInt();
        int numHeads = config.get("num_attention_heads").asInt();
        int max_position_embeddings = config.get("max_position_embeddings").asInt();

        // 2. 计算显存占用系数
        int bytesPerParam = getBytesPerDtype(gpu.dtype);
        long modelMem = paramCount * bytesPerParam; // 模型参数显存

        // 3. 计算单token KV缓存
        long kvPerToken = (long) numLayers * hiddenSize * numHeads * 2 * bytesPerParam;

        // 4. 计算可用显存
        long availableMem = (long) ((gpu.vramMB * 1024L) * 1024L * gpu.memoryUtil);

        // 5. 计算最大长度
        if (modelMem > availableMem) throw new RuntimeException("显存不足");
        long maxLen =  ((long) (availableMem - modelMem) / kvPerToken);
        if (maxLen > max_position_embeddings) {
            maxLen = max_position_embeddings;
        } else {
            double correctionFactor = 0.95; // PagedAttention内存碎片补偿
            long actualMaxLen = (long) (maxLen * correctionFactor);
            return actualMaxLen;
        }
        return maxLen;
    }

    // GPU集群配置类
    static class ClusterConfig {
        int gpuCount;            // GPU数量
        String parallelType;     // 并行策略（"tensor"/"pipeline"）
        int tensorParallelSize;  // 张量并行度（如4）
    }

    public static long calculateMultiGPU(ClusterConfig cluster, GPUConfig gpu, File configFile, long paramCount) throws Exception {
        // 解析模型参数
        JsonNode config = new ObjectMapper().readTree(configFile);
        int numLayers = config.get("num_hidden_layers").asInt();
        int hiddenSize = config.get("hidden_size").asInt();
        int numHeads = config.get("num_attention_heads").asInt();
        int max_position_embeddings = config.get("max_position_embeddings").asInt();
        // 调整参数计算
        int bytesPerParam = getBytesPerDtype(gpu.dtype);
        long modelMemPerGPU;
        if ("tensor".equals(cluster.parallelType)) {
            modelMemPerGPU = (paramCount / cluster.tensorParallelSize) * bytesPerParam;
            hiddenSize = hiddenSize / cluster.tensorParallelSize;  // 拆分隐藏维度
            numHeads = numHeads / cluster.tensorParallelSize;      // 拆分注意力头数
        } else {
            modelMemPerGPU = paramCount * bytesPerParam;  // 流水线并行不做参数切分
        }

        // 计算单卡KV缓存
        long kvPerToken = (long) numLayers * hiddenSize * numHeads * 2 * bytesPerParam;

        // 总可用显存（集群）
        long totalAvailable = (long) (gpu.vramMB * 1024L * 1024L * gpu.memoryUtil * cluster.gpuCount);

        // 显存校验（按单卡计算）
        long perGPUMem = modelMemPerGPU;
        if (perGPUMem > (totalAvailable / cluster.gpuCount)) {
            throw new RuntimeException("单卡显存不足");
        }

        // 最终计算
        int maxLen = (int) ((totalAvailable - (modelMemPerGPU * cluster.gpuCount)) / (kvPerToken * cluster.gpuCount));
        if (maxLen > max_position_embeddings) {
            maxLen = max_position_embeddings;
        } else {
            double correctionFactor = 0.9; // PagedAttention内存碎片补偿
            long actualMaxLen = (long) (maxLen * correctionFactor);
            return actualMaxLen;
        }
        return maxLen;
    }

    // 精度映射
    private static int getBytesPerDtype(String dtype) {
        switch (dtype.toLowerCase()) {
            case "float32":
                return 4;
            case "bfloat16":
            case "float16":
            case "half":
                return 2;
            case "int8":
                return 1;
            default:
                throw new IllegalArgumentException("不支持的精度类型: " + dtype);
        }
    }

    // 示例调用
    public static void main(String[] args) throws Exception {
        GPUConfig gpu = new GPUConfig();
        gpu.modelType = "A100";
        gpu.vramMB = 40 * 1024; // 40GB → 40960MB
        gpu.memoryUtil = 0.95f;
        gpu.dtype = "float16";

        File config = new File("D:\\JavaCode\\frame\\tools-demo\\src\\test\\resources\\config2.json");
        long params = 16_000_000_000L; // 7B参数

        long maxLen = calculate(gpu, config, params);
        System.out.println("max_model_len: " + maxLen); // 输出示例：16384

        ClusterConfig clusterConfig = new ClusterConfig();
        clusterConfig.gpuCount = 2;
        clusterConfig.parallelType = "tensor";
        clusterConfig.tensorParallelSize = 2;

        long maxLen2 = calculateMultiGPU(clusterConfig, gpu, config, params);
        System.out.println("max_model_len: " + maxLen2); // 输出示例：16384
    }
}
