import json
import os
import random
from datasets import Dataset
import torch
from unsloth import FastLanguageModel
from transformers import AutoTokenizer

# 模型一些参数配置
max_seq_length = 2048  # 序列最长限制
dtype = None
load_in_4bit = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# 训练提示风格
train_prompt_style = """
    ### 提示:
    你是一个对于解答高考题目有丰富经验的专家，现在有人问你关于{}。
    请回答下面的问题，在回答问题之前请给出逐步的推理过程。

    ### 问题：
    {}

    ### 回答：
    <think>
    {}
    </think>
    <answer>
    {}
    </answer>
    """

# 格式化提示函数
def formatting_prompts_func(data):
    EOS_TOKEN = tokenizer.eos_token
    keywords = data["keywords"]
    inputs = data["Question"]
    cots = data["Complex_CoT"]
    outputs = data["Response"]
    texts = []
    for k, i, c, o in zip(keywords, inputs, cots, outputs):
        text = train_prompt_style.format(k, i, c, o) + EOS_TOKEN
        texts.append(text)
    return {
        "text": texts,
    }

# 读取高考数据
def read_GAOKAO_data(root_folder):
    train_data = []
    for foldername, _, filenames in os.walk(root_folder):
        for filename in filenames:
            if filename.endswith('.json'):  # 确保只处理 JSON 文件
                file_path = os.path.join(foldername, filename)  # 获取完整文件路径
                with open(file_path, 'r', encoding='utf-8') as file:
                    file_content = file.read()  # 读取文件内容
                    data_dict = json.loads(file_content)  # 加载 JSON 文件内容
                    k = data_dict["keywords"]
                    examples = data_dict["example"]
                    for example in examples:
                        q = example["question"]

                        # ans是一个list，转换为字符串
                        ans = example["answer"]
                        ans = ", ".join(ans)

                        cot = example["analysis"]
                        tmp_dict = {"keywords": k,
                                    "Question": q,
                                    "Complex_CoT": cot,
                                    "Response": ans
                                    }
                        train_data.append(tmp_dict)  # 将内容添加到列表中

    return train_data

# 获取训练数据
def get_train_data(root_folder):
    train_data = read_GAOKAO_data(root_folder)
    # 转换为字典格式
    data_dict = {key: [item[key] for item in train_data] for key in train_data[0].keys()}
    # 创建 Dataset 对象
    train_data = Dataset.from_dict(data_dict)
    train_data = train_data.map(formatting_prompts_func, batched=True)
    return train_data

# 推理提示风格
prompt_style = """
    ### 提示:
    你是一个对于解答高考题目有丰富经验的专家，现在有人问你关于{}。
    请回答下面的问题，在回答问题之前请给出逐步的推理过程。

    ### 问题：
    {}

    ### 回答：
    <think>{}
    """

# 加载训练好的模型
model_path = "Ds_Llama8B_GAOKAO"
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name=model_path,
    max_seq_length=max_seq_length,
    dtype=dtype,
    load_in_4bit=load_in_4bit,
    device_map={"": device},  # 将所有参数加载到指定设备
)

# 获取训练数据
train_data = get_train_data("GAOKAO")

# 训练数据一致性验证
num_samples = 5  # 验证的样本数量
correct_count = 0

for _ in range(num_samples):
    # 随机选择一个样本
    random_index = random.randint(0, len(train_data) - 1)
    sample = train_data[random_index]

    # 提取问题和类型
    question = sample["Question"]
    questype = sample["keywords"]
    train_answer = sample["Response"]

    # 构建输入
    inputs = tokenizer([prompt_style.format(questype, question, "")], return_tensors="pt").to(device)

    # 进行推理
    FastLanguageModel.for_inference(model)
    outputs = model.generate(
        input_ids=inputs.input_ids,
        attention_mask=inputs.attention_mask,
        max_new_tokens=1200,
        use_cache=True,
    )
    response = tokenizer.batch_decode(outputs)[0].split("### 回答：")[1]

    # 检查答案是否一致
    if train_answer in response:
        correct_count += 1
        result = "一致"
    else:
        result = "不一致"

    print(f"样本索引: {random_index}")
    print(f"训练数据中的答案: {train_answer}")
    print(f"模型输出的答案: {response}")
    print(f"答案一致性检查结果: {result}")
    print("-" * 50)

# 计算准确率
accuracy = correct_count / num_samples
print(f"在 {num_samples} 个训练样本上的准确率: {accuracy * 100:.2f}%")