import torch
from peft import LoraConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, AutoModel
from datasets import load_dataset
from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM                       
import accelerate

dataset = load_dataset("qingmian/CyberSecurity-Chinese-Mixed-V2", split="train")

model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-7B")
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B")

# model.embed_tokens.weight.requires_grad = False

# def formatting_prompts_func(example):
#     output_texts = []
#     for i in range(len(example['input'])):
#         text = f"### Question: {example['input'][i]}\n ### Answer: {example['output'][i]}"
#         output_texts.append(text)
#     return output_texts

# response_template = " ### Answer:"
# collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer)

# peft_config = LoraConfig(
#     r=16,
#     lora_alpha=16,
#     target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
#                       "gate_proj", "up_proj", "down_proj"],
#     lora_dropout=0.1,
#     bias="none",
#     modules_to_save=["lm_head", "embed_token"],
#     task_type="CAUSAL_LM",
# )

training_params = TrainingArguments(
        output_dir="./tmp",  # 结果路径
        num_train_epochs=2,  # 总的训练轮数
        per_device_train_batch_size=2,  # 这是每个GPU的训练批次大小
        gradient_accumulation_steps=2,  # 累积多个步骤的梯度，以有效地增加批次大小
        save_strategy='no',
        gradient_checkpointing=False,  # 模型支持梯度检查点
        gradient_checkpointing_kwargs={"use_reentrant": False},  # 解决use_reentrant警告
        optim="paged_adamw_32bit",  # 优化器
        logging_steps=100,  # 训练日志输出之间的步数
        learning_rate=1e-8,  # 初始学习率
        weight_decay=0.001,  # 权重衰减率
        fp16=True,  # 启用混合精度训练
        bf16=False,  # 不启用BF16
        max_steps=10,  # 最大训练迭代次数
        warmup_ratio=0.01,  # 训练开始时的预热样本比例
        group_by_length=False,  # 将训练数据集中大致相同长度的样本分组到同一batch中，提升prefill效率
        lr_scheduler_type="constant",  # 学习率调度器将使用常数衰减策略
    )

trainer = SFTTrainer(
    model,
    train_dataset=dataset,
    args=training_params,
    # formatting_func=formatting_prompts_func,
    # data_collator=collator,
    # peft_config=peft_config
)

trainer.train()

model.save_pretrained('./outputs')
tokenizer.save_pretrained('./outputs')