# -*- coding: utf-8 -*-
# @Time    : 2025/2/10 10:54
# @Author  : 
# @File    : train_qlora.py
# @Software: PyCharm 
# @Comment : 用 qlora 微调模型

from transformers import BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer

from config import Config
from train_lora import lora
from utils import init_distributed_mode

if __name__ == '__main__':
    # 初始化分布式环境
    _, ddp_local_rank, _, DEVICE = init_distributed_mode()

    # 模型量化
    bnb_config = BitsAndBytesConfig(
        load_in_4bit = True,
        bnb_4bit_use_double_quant = True,
        bnb_4bit_quant_type = "nf4",
        bnb_4bit_compute_dtype = Config.torch_dtype,
    )

    # 预训练模型加载
    tokenizer = AutoTokenizer.from_pretrained(
        Config.pretrained_model_path,
        use_fast=False,
        trust_remote_code=True
    )

    print("当前 GPU 设备: " + DEVICE)

    pretrained_model = AutoModelForCausalLM.from_pretrained(
        pretrained_model_name_or_path = Config.pretrained_model_path,
        quantization_config = bnb_config,
        low_cpu_mem_usage = True,
        trust_remote_code = True,
        torch_dtype = Config.torch_dtype,
        use_cache=False,
        device_map=DEVICE,
    ).enable_input_require_grads()  # 启用输入梯度

    lora(tokenizer, pretrained_model)
