import json
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel


def load_model_and_tokenizer(base_model, lora_adapter):
    """加载基础模型、分词器和LoRA适配器"""
    tokenizer = AutoTokenizer.from_pretrained(base_model)
    tokenizer.pad_token = tokenizer.eos_token

    # 加载基础模型
    model = AutoModelForCausalLM.from_pretrained(
        base_model,
        device_map="auto",
        torch_dtype=torch.float16,
        low_cpu_mem_usage=True
    )

    # 加载LoRA适配器
    model = PeftModel.from_pretrained(model, lora_adapter)
    model.eval()  # 切换到评估模式
    return model, tokenizer


def generate_solution(model, tokenizer, nums, target, max_length=64):
    """生成给定数字和目标值的计算表达式"""
    nums_str = ", ".join(map(str, nums))
    prompt = f"<s>[INST] 用{nums_str}得到{target}的数学表达式：[/INST]"

    inputs = tokenizer(
        prompt,
        return_tensors="pt",
        truncation=True,
        max_length=max_length,
        padding="max_length"
    ).to(model.device)

    # 生成配置
    with torch.no_grad():  # 禁用梯度计算节省内存
        outputs = model.generate(
            **inputs,
            max_new_tokens=32,  # 限制生成长度
            temperature=0.7,  # 控制随机性
            top_p=0.9,
            do_sample=True,
            eos_token_id=tokenizer.eos_token_id,
            pad_token_id=tokenizer.pad_token_id
        )

    # 解码并提取生成的表达式
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    # 提取[/INST]之后的内容


    return response.strip()


def test_on_dataset(model, tokenizer, test_data):
    """在测试集上进行推理并输出结果"""
    results = []
    for item in test_data:
        nums = item['nums']
        target = item['target']
        print(f"num: {nums}, target: {target}")

        solution = generate_solution(model, tokenizer, nums, target)
        print(f"solution: {solution}\n")

        results.append({
            "nums": nums,
            "target": target,
            "predicted_solution": solution,
            "original_solution": item['solution']
        })

    return results


def main():
    # 配置
    base_model = "Qwen/Qwen2.5-0.5B"
    lora_adapter = "./qwen-lora-adapter-small"  # 训练好的LoRA适配器路径
    test_data_path = "data.json"  # 测试数据集路径

    # 加载模型和分词器
    print("load...")
    model, tokenizer = load_model_and_tokenizer(base_model, lora_adapter)

    # 加载测试数据
    with open(test_data_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    test_data = data['test']

    # 执行测试
    print("start...")
    results = test_on_dataset(model, tokenizer, test_data)

    # 保存结果
    with open("test_results.json", 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=2)
    print("finish")


if __name__ == "__main__":
    main()
