# server_eval_runner.py (v2 - with parameter check)
import os
import json
import importlib.util
from tqdm import tqdm
from transformers import AutoConfig
from accelerate import init_empty_weights

# 评测环境内的固定路径
USER_CODE_PATH = "/user_code/predict.py"
USER_MODEL_PATH = "/user_model"  # 选手的模型目录
DATASET_PATH = "/eval_dataset/dataset.json"
IMAGE_ROOT_PATH = "/eval_dataset/images"
OUTPUT_PATH = "/output/predictions.json"
PARAMETER_LIMIT = 10_000_000_000  # 10B 参数量限制


def check_model_parameters(model_path: str) -> (int, bool):
    """
    在不完全加载模型到内存的情况下，计算模型的总参数量。
    使用 accelerate.init_empty_weights 来高效完成此操作。
    """
    print(f"--- [参数检查] 开始检查模型参数量，路径: {model_path} ---", flush=True)
    try:
        # 仅加载模型的配置文件
        config = AutoConfig.from_pretrained(model_path)

        # 使用空权重初始化模型结构，这不会消耗大量内存
        with init_empty_weights():
            # 动态地从配置中推断模型类。
            # 这比写死 AutoModelForCausalLM 更通用，能适应多模态等不同模型。
            from transformers import AutoModelForCausalLM, AutoModelForVision2Seq
            model_class = AutoModelForCausalLM if not getattr(config, "is_vision_model",
                                                              False) else AutoModelForVision2Seq
            model = model_class.from_config(config)

        total_params = sum(p.numel() for p in model.parameters())
        is_compliant = total_params <= PARAMETER_LIMIT

        print(f"--- [参数检查] 模型总参数量: {total_params / 1e9:.2f}B ---", flush=True)
        if not is_compliant:
            print(f"--- [参数检查] 警告: 模型参数量 {total_params} 超过 10B 限制! ---", flush=True)

        return total_params, is_compliant
    except Exception as e:
        print(f"--- [参数检查] 无法计算模型参数量: {e} ---", flush=True)
        # 如果无法检查，出于安全考虑，默认视为不合规
        return -1, False


def main():
    print("--- [评测容器] 开始执行评测 ---", flush=True)

    # 1. !! 新增 !! 在一切开始前，检查模型参数量
    total_params, is_compliant = check_model_parameters(USER_MODEL_PATH)
    if not is_compliant:
        error_message = (f"Model parameter count ({total_params}) exceeds the 10B limit."
                         if total_params > 0 else "Could not determine model parameter count.")
        with open(OUTPUT_PATH, "w") as f:
            json.dump({"error": error_message}, f)
        print(f"[评测容器] CRITICAL: {error_message}", flush=True)
        return

    # 2. 动态加载选手的 predict.py (如果参数量合规)
    try:
        spec = importlib.util.spec_from_file_location("predict", USER_CODE_PATH)
        user_module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(user_module)
        print("[评测容器] 成功加载选手 predict.py", flush=True)
    except Exception as e:
        print(f"[评测容器] CRITICAL: 加载选手代码失败: {e}", flush=True)
        with open(OUTPUT_PATH, "w") as f:
            json.dump({"error": f"Failed to load predict.py: {e}"}, f)
        return

    # 3. 调用选手的 init_model()
    try:
        model = user_module.init_model()
        print("[评测容器] 成功初始化模型", flush=True)
    except Exception as e:
        print(f"[评测容器] CRITICAL: 模型初始化失败: {e}", flush=True)
        with open(OUTPUT_PATH, "w") as f:
            json.dump({"error": f"Failed to init_model: {e}"}, f)
        return

    # 4. 加载数据集并逐题预测
    with open(DATASET_PATH, 'r', encoding='utf-8') as f:
        dataset = json.load(f)

    predictions = []
    for item in tqdm(dataset, desc="[评测容器] 预测进度"):
        try:
            result = user_module.predict(model, item, IMAGE_ROOT_PATH)
            predictions.append(result)
        except Exception as e:
            print(f"\n[评测容器] 题目 {item.get('id')} 预测出错: {e}", flush=True)
            predictions.append({
                "id": item.get('id'),
                "answer": "",
                "model_output": f"PREDICTION_ERROR: {e}"
            })

    # 5. 保存所有预测结果
    with open(OUTPUT_PATH, "w", encoding='utf-8') as f:
        json.dump(predictions, f, ensure_ascii=False, indent=4)

    print(f"--- [评测容器] 预测完成，结果已保存至 {OUTPUT_PATH} ---", flush=True)


if __name__ == "__main__":
    print("--- [评测容器] MOCK MODE: 跳过模型推理 ---", flush=True)
    mock_predictions = [{"id": "q1", "answer": "A"}, {"id": "q2", "answer": "B"}]
    with open("/output/predictions.json", "w") as f:
        json.dump(mock_predictions, f)
    print("--- [评测容器] MOCK MODE: 已生成模拟结果 ---", flush=True)
    # main()