import json
import os
from datasets import Dataset
import wandb
import torch 
print(torch.__version__)
print(torch.cuda.is_available())
from unsloth import FastLanguageModel
#模型一些参数配置
max_seq_length = 2048 #序列最长限制
dtype = None 
load_in_4bit = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#DeepSeek-R1-Distill-Llama-8B 更适用于英语
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "./deepseek-r1:8b-unsloth",
    max_seq_length = max_seq_length,
    dtype = dtype,
    load_in_4bit = load_in_4bit,
    device_map={"": device},  # 将所有参数加载到指定设备
)

print(model)
print("tokenizer:", tokenizer)
# 获取序列结束标记
eos_token = tokenizer.eos_token
print(f"EOS Token: {eos_token}")

train_prompt_style = """
    ### 提示:
    你是一个对于解答高考题目有丰富经验的专家，现在有人问你关于{}。
    请回答下面的问题，在回答问题之前请给出逐步的推理过程。

    ### 问题：
    {}

    ### 回答：
    <think>
    {}
    </think>
    <answer>
    {}
    </answer>
    """
def formatting_prompts_func(data):

    EOS_TOKEN = tokenizer.eos_token
    keywords = data["keywords"]
    inputs = data["Question"]
    cots = data["Complex_CoT"]
    outputs = data["Response"]
    texts = []
    for k,i,c,o in zip(keywords, inputs, cots, outputs):
        text = train_prompt_style.format(k, i, c, o) + EOS_TOKEN
        texts.append(text)
    return {
        "text": texts,
        }
def read_GAOKAO_data(root_folder):
    #读取root_folder下所有文件
    #返回初步训练数据，包含'keywords' 'Question' 'Complex_CoT' 'Response' 四个字段

    train_data = [] 
    for foldername, _ , filenames in os.walk(root_folder):
        for filename in filenames:
            if filename.endswith('.json'):  # 确保只处理 JSON 文件
                file_path = os.path.join(foldername, filename)  # 获取完整文件路径
                with open(file_path, 'r', encoding='utf-8') as file:
                    file_content = file.read()  # 读取文件内容
                    data_dict = json.loads(file_content)  # 加载 JSON 文件内容
                    k = data_dict["keywords"]
                    examples = data_dict["example"]
                    for example in examples:
                        q = example["question"]
                        
                        # ans是一个list，转换为字符串
                        ans = example["answer"]
                        ans = ", ".join(ans) 

                        cot = example["analysis"]
                        tmp_dict = {"keywords": k,
                                    "Question": q,
                                    "Complex_CoT": cot,
                                    "Response": ans
                                    } 
                        train_data.append(tmp_dict)  # 将内容添加到列表中

    return train_data
#测试一下功能

root_folder = "GAOKAO"  # 替换为你的根文件夹路径
train_data = read_GAOKAO_data(root_folder)
print("first complex cot:", train_data[0]["Complex_CoT"])

# 要将普通list数据转换为huggingface的Dataset，方便调用各类数据处理函数
data_dict = {key: [item[key] for item in train_data] for key in train_data[0].keys()}
# 使用 Dataset.from_dict() 创建 Dataset 对象
train_data = Dataset.from_dict(data_dict)

train_data = train_data.map(formatting_prompts_func, batched = True,)

print("first train data text:", train_data[0]["text"])


