import json
import os
from datasets import Dataset
import wandb
import torch 
print(torch.__version__)
print(torch.cuda.is_available())
from unsloth import FastLanguageModel
#模型一些参数配置
max_seq_length = 2048 #序列最长限制
dtype = None 
load_in_4bit = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#DeepSeek-R1-Distill-Llama-8B 更适用于英语
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "./DeepSeek-R1-Distill-Llama-8B",
    max_seq_length = max_seq_length,
    dtype = dtype,
    load_in_4bit = load_in_4bit,
    device_map={"": device},  # 将所有参数加载到指定设备
)

print(model)
print("tokenizer:", tokenizer)
# 获取序列结束标记
eos_token = tokenizer.eos_token
print(f"EOS Token: {eos_token}")

train_prompt_style = """
    ### 提示:
    你是一个对于解答高考题目有丰富经验的专家，现在有人问你关于{}。
    请回答下面的问题，在回答问题之前请给出逐步的推理过程。

    ### 问题：
    {}

    ### 回答：
    <think>
    {}
    </think>
    <answer>
    {}
    </answer>
    """
def formatting_prompts_func(data):

    EOS_TOKEN = tokenizer.eos_token
    keywords = data["keywords"]
    inputs = data["Question"]
    cots = data["Complex_CoT"]
    outputs = data["Response"]
    texts = []
    for k,i,c,o in zip(keywords, inputs, cots, outputs):
        text = train_prompt_style.format(k, i, c, o) + EOS_TOKEN
        texts.append(text)
    return {
        "text": texts,
        }
def read_GAOKAO_data(root_folder):
    #读取root_folder下所有文件
    #返回初步训练数据，包含'keywords' 'Question' 'Complex_CoT' 'Response' 四个字段

    train_data = [] 
    for foldername, _ , filenames in os.walk(root_folder):
        for filename in filenames:
            if filename.endswith('.json'):  # 确保只处理 JSON 文件
                file_path = os.path.join(foldername, filename)  # 获取完整文件路径
                with open(file_path, 'r', encoding='utf-8') as file:
                    file_content = file.read()  # 读取文件内容
                    data_dict = json.loads(file_content)  # 加载 JSON 文件内容
                    k = data_dict["keywords"]
                    examples = data_dict["example"]
                    for example in examples:
                        q = example["question"]
                        
                        # ans是一个list，转换为字符串
                        ans = example["answer"]
                        ans = ", ".join(ans) 

                        cot = example["analysis"]
                        tmp_dict = {"keywords": k,
                                    "Question": q,
                                    "Complex_CoT": cot,
                                    "Response": ans
                                    } 
                        train_data.append(tmp_dict)  # 将内容添加到列表中

    return train_data
#测试一下功能


#把数据处理和读取数据封装成一个函数
def get_train_data(root_folder):
    root_folder = "GAOKAO"  # 替换为你的根文件夹路径
    train_data = read_GAOKAO_data(root_folder)
    #转换为huggingface数据集，方便使用封装的各种数据处理方法
    
    # 将列表转换为字典格式
    data_dict = {key: [item[key] for item in train_data] for key in train_data[0].keys()}
    # 使用 Dataset.from_dict() 创建 Dataset 对象
    train_data = Dataset.from_dict(data_dict)

    train_data = train_data.map(formatting_prompts_func, batched = True,)
    return train_data

train_data = get_train_data("GAOKAO")

from trl import SFTTrainer
from transformers import TrainingArguments
from unsloth import is_bfloat16_supported

model = FastLanguageModel.get_peft_model(
    model,
    r=16,  
    target_modules=[
        "q_proj",
        "k_proj",
        "v_proj",
        "o_proj",
        "gate_proj",
        "up_proj",
        "down_proj",
    ],
    lora_alpha=16,
    lora_dropout=0,  
    bias="none",  
    use_gradient_checkpointing="unsloth",  # True or "unsloth" for very long context
    random_state=1290,
    use_rslora=False,  
    loftq_config=None,
)
trainer = SFTTrainer(
    model=model,
    tokenizer=tokenizer,
    train_dataset=train_data,
    dataset_text_field="text",
    max_seq_length=max_seq_length,
    dataset_num_proc=2,
    args=TrainingArguments(
        per_device_train_batch_size=2,
        num_train_epochs=3,
        gradient_accumulation_steps=4,
        # Use num_train_epochs = 1, warmup_ratio for full training runs!
        warmup_steps=4,
        learning_rate=2e-4,
        fp16=not is_bfloat16_supported(),
        bf16=is_bfloat16_supported(),
        logging_steps=10,
        optim="adamw_8bit",
        weight_decay=0.01,
        lr_scheduler_type="linear",
        seed=1291,
        output_dir="outputs",
    ),
)
wandb.login(key="03df53261308ddd21901480d7befd1ad4e4de221")
trainer_stats = trainer.train()
prompt_style = """
    ### 提示:
    你是一个对于解答高考题目有丰富经验的专家，现在有人问你关于{}。
    请回答下面的问题，在回答问题之前请给出逐步的推理过程。

    ### 问题：
    {}

    ### 回答：
    <think>{}
    """
question = "3. (5 分) 曲线 $y=\\frac{x}{x+2}$ 在点 $(-1,-1)$ 处的切线方程为（ $）$\nA. $y=2 x+1$\nB. $y=2 x-1$\nC. $y=-2 x-3$\nD. $y=-2 x-2$\n"
questype = "2010-2022_Math_I_MCQs"

FastLanguageModel.for_inference(model)  # Unsloth has 2x faster inference!
inputs = tokenizer([prompt_style.format(questype ,question, "")], return_tensors="pt").to("cuda")

outputs = model.generate(
    input_ids=inputs.input_ids,
    attention_mask=inputs.attention_mask,
    max_new_tokens=1200,
    use_cache=True,
)
response = tokenizer.batch_decode(outputs)
print(response[0].split("### 回答：")[1])
model_save = "Ds_Llama8B_GAOKAO"
model.save_pretrained(model_save) 

