# 导入必要的库
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments, DataCollatorForLanguageModeling
from datasets import load_dataset, Dataset
import json
import torch
import os
from peft import get_peft_model, LoraConfig, TaskType

# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

# 加载预训练的 qwen-7b 模型和对应的 tokenizer
# model_name = r"D:\share\python\python_net\deepseek\results\cp"
model_name = r"D:\lmStudioModel\gitModel\DeepSeek-R1-Distill-Qwen-1.5B"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# 加载数据集
with open("data/medical_o1_sft_Chinese.json", "r", encoding="utf-8") as f:
    train_data = json.load(f)

# 将数据转换为 Hugging Face Dataset 格式
dataset = Dataset.from_list(train_data)


# 数据预处理函数
def preprocess_function(examples):
    prompts = examples["Question"]
    think = examples["Complex_CoT"]
    responses = examples["Response"]
    inputs = [
        f"<｜begin▁of▁sentence｜><｜User｜>{prompt}<｜Assistant｜><think>{think}</think>\n{response}"
        for prompt, think, response in zip(prompts, think, responses)
    ]
    # Tokenize inputs and labels
    model_inputs = tokenizer(inputs, max_length=512, truncation=True, padding="max_length", return_tensors="pt")

    # Shift labels to the right by one token (for causal language modeling)
    labels = model_inputs["input_ids"].clone()
    labels[:, :-1] = labels[:, 1:].clone()
    labels[:, -1] = -100  # Ignore index for padding

    model_inputs["labels"] = labels
    return model_inputs


# 应用数据预处理
tokenized_datasets = dataset.map(preprocess_function, batched=True)

# 将数据集分割为训练集和验证集
train_test_split = tokenized_datasets.train_test_split(test_size=0.2)

# 配置 LoRA
peft_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    r=8,  # 低秩矩阵的秩
    lora_alpha=32,
    lora_dropout=0.1,
    target_modules=["q_proj", "v_proj"]  # 根据模型结构调整
)

# 应用 LoRA 到模型
model = get_peft_model(model, peft_config)

# 设置训练参数
training_args = TrainingArguments(
    output_dir="./results",
    eval_strategy="epoch",
    learning_rate=2e-5,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=4,
    gradient_accumulation_steps=2,
    fp16=True,  # 启用混合精度训练
    num_train_epochs=3,
    weight_decay=0.01,
    logging_dir="./logs",
    logging_steps=10,
    save_steps=50,
    save_total_limit=2,
    report_to="tensorboard",  # 启用 TensorBoard 日志记录
)

# 使用 DataCollatorForLanguageModeling
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer, mlm=False
)

# 初始化 Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_test_split["train"],
    eval_dataset=train_test_split["model_test"],
    data_collator=data_collator
)

# 开始训练
trainer.train()
torch.cuda.empty_cache()  # 清理 GPU 缓存

# 保存微调后的模型
model.save_pretrained("./fine-tuned-qwen-1.5b")
tokenizer.save_pretrained("./fine-tuned-qwen-1.5b")
