# -*- coding: utf-8 -*-
# @Time    : 2025/2/10 10:54
# @Author  :
# @File    : train_lora.py
# @Software: PyCharm
# @Comment : 用 lora 微调模型

import datasets
import swanlab
import torch
from peft import LoraConfig, TaskType, get_peft_model
from swanlab.integration.transformers import SwanLabCallback
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, \
    DataCollatorForSeq2Seq

from config import Config
from utils import qwen_format, init_distributed_mode


def lora(tokenizer_pretrained, model_pretrained):
    # lora 微调配置
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        inference_mode=False,  # 微调模式
        target_modules=Config.target_modules,
        r=Config.lora_rank,
        lora_alpha=Config.lora_alpha,
        lora_dropout=Config.lora_dropout
    )

    # 微调模型
    peft_model = get_peft_model(model_pretrained, peft_config)

    # 训练参数
    train_args = TrainingArguments(
        output_dir=Config.lora_model_path,
        per_device_train_batch_size=Config.batch_size,
        eval_strategy="steps",  # (str or IntervalStrategy, optional, defaults to "no") - 评估策略，可选值有："no"、"steps"、"epoch"
        learning_rate=Config.learning_rate,  # (float, optional, defaults to 5e-5) – 学习率的初始值，默认使用AdamW的优化算法
        lr_scheduler_type="cosine",  # (str, optional, defaults to "linear") - 学习率调度器类型
        gradient_accumulation_steps=Config.gradient_accumulation_steps,
        # (int, optional, defaults to 1) - 梯度累积的步数，会影响 logging_steps、eval_steps 等参数的计算
        num_train_epochs=Config.epochs,  # (int, optional, defaults to 1) - 训练的轮数，默认为1轮
        eval_steps=Config.eval_step,
        logging_steps=Config.logging_steps,  # (int, optional, defaults to 500) - 每隔多少步进行日志记录
        save_steps=Config.save_step,
        load_best_model_at_end=True,
        optim="paged_adamw_32bit",
        gradient_checkpointing=True,
        gradient_checkpointing_kwargs={"use_reentrant": False},  # 必须添加
        ddp_find_unused_parameters=False,  # 必须设置为False
        fp16=Config.torch_dtype == torch.float16,  # 启用 fp16 支持
        bf16=Config.torch_dtype == torch.bfloat16,
        dataloader_num_workers=8,
    )

    # 加载数据集
    dataset = datasets.load_from_disk(Config.processed_datasets_path)
    # print(dataset)
    train_dataset = dataset["train"].map(
        qwen_format,
        fn_kwargs={"tokenizer": tokenizer_pretrained},
        remove_columns=dataset["train"].column_names,
        num_proc=8,
    )

    eval_dataset = dataset["test"].map(
        qwen_format,
        fn_kwargs={"tokenizer": tokenizer_pretrained},
        remove_columns=dataset["test"].column_names,
        num_proc=8,
    )

    # SwanLab 回调
    swanlab.login(api_key=Config.swanlab_api_key)
    swanlab_callback = SwanLabCallback(
        workspace="fvtao",
        project="VeriCoder",
        experiment_name="qwen-7b-lora",
        config=dict(Config.__dict__)  # 记录配置参数
    )

    data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer_pretrained, padding=True)

    # QLoRA 微调
    trainer = Trainer(
        model=peft_model,
        args=train_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        data_collator=data_collator,
        callbacks=[swanlab_callback],
    )

    # 开始训练
    trainer.train()

    # 保存微调后的模型
    peft_model.save_pretrained(Config.lora_model_path)


if __name__ == '__main__':
    # 初始化分布式环境
    _, ddp_local_rank, _, device = init_distributed_mode()

    # 预训练模型加载
    tokenizer = AutoTokenizer.from_pretrained(
        Config.pretrained_model_path,
        use_fast=False,
        trust_remote_code=True
    )

    pretrained_model = AutoModelForCausalLM.from_pretrained(
        pretrained_model_name_or_path=Config.pretrained_model_path,
        trust_remote_code=True,
        torch_dtype=Config.torch_dtype,
        use_cache=False,
        device_map=device,
    ).enable_input_require_grads()  # 启用输入梯度

    lora(tokenizer, pretrained_model)
