import json
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments
from trl import RewardTrainer
from datasets import load_dataset, Dataset
import torch

# 步骤1：加载预训练模型和分词器
model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# 步骤2：加载奖励模型
reward_model_name = "deepseek-ai/reward-model"  # 替换为你的奖励模型名称
reward_model = AutoModelForSequenceClassification.from_pretrained(reward_model_name, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True)

# 步骤3：准备训练数据
# 假设你有一个 JSON 文件 train_data.json
with open("data/medical_o1_sft_Chinese.json", "r", encoding="utf-8") as f:
    train_data = json.load(f)

# 将数据转换为 Hugging Face Dataset 格式
train_dataset = Dataset.from_list(train_data)

i = 1


# 步骤4：定义数据预处理函数
def preprocess_function(examples):
    prompts = examples["Question"]
    think = examples["Complex_CoT"]
    responses = examples["Response"]
    inputs = [f"<｜begin▁of▁sentence｜><｜User｜>{prompt}<｜Assistant｜><think>{think}</think>\n{response}" for prompt, think, response in zip(prompts, think, responses)]
    model_inputs = tokenizer(inputs, max_length=4096, truncation=True, padding="max_length", return_tensors="pt")
    return model_inputs


train_dataset = train_dataset.map(preprocess_function, batched=True)

# 步骤5：配置训练参数
training_args = TrainingArguments(
    output_dir="./results",
    num_train_epochs=3,
    per_device_train_batch_size=4,
    save_steps=1000,
    save_total_limit=2,
    logging_dir="./logs",
    logging_steps=10,
    learning_rate=2e-5,
    fp16=True,
    gradient_accumulation_steps=2,
    evaluation_strategy="epoch",  # 可选：每轮结束时进行评估
    eval_steps=100,  # 可选：每100步进行评估
    load_best_model_at_end=True,  # 可选：加载最佳模型
    metric_for_best_model="eval_loss",  # 可选：使用评估损失作为最佳模型的指标
)

# 步骤6：初始化 RewardTrainer
trainer = RewardTrainer(
    model=model,
    reward_model=reward_model,
    args=training_args,
    train_dataset=train_dataset,
    tokenizer=tokenizer,
)

# 步骤7：开始训练
trainer.train()

# 步骤8：保存微调后的模型
model.save_pretrained("./fine_tuned_model")
tokenizer.save_pretrained("./fine_tuned_model")
