# -*- coding: utf-8 -*-
import torch
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    TrainingArguments,
)
import config
from model.ERNIE import ERNIEFineTuner
from evaluates.Metrics import MetricsComputer
from src.sftdata import SFTDataProcessor
from peft import LoraConfig, get_peft_model, TaskType
import os

class SFTModelLoader:
    def __init__(self, model_name):
        self.model_name = model_name

    def load_tokenizer(self):
        print(f"正在加载Tokenizer: {self.model_name}")
        tokenizer = AutoTokenizer.from_pretrained(self.model_name, trust_remote_code=True)
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        return tokenizer

    def load_model(self, use_4bit_quant=True):
        print(f"正在加载模型: {self.model_name}")
        bnb_config = None
        if use_4bit_quant:
            print("已启用4位量化 (QLoRA)。")
            compute_dtype = getattr(torch, "bfloat16")
            bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_quant_type="nf4",
                bnb_4bit_compute_dtype=compute_dtype,
                bnb_4bit_use_double_quant=False,
            )
        model = AutoModelForCausalLM.from_pretrained(
            self.model_name,
            quantization_config=bnb_config,
            device_map="auto",
            trust_remote_code=True
        )
        model.config.use_cache = False
        model.config.pretraining_tp = 1
        return model


class TrainingPipeline:
    def __init__(self, model_name, train_dataset_path, eval_dataset_path, config):
        self.model_name = model_name
        self.train_dataset_path = train_dataset_path
        self.eval_dataset_path = eval_dataset_path
        self.config = config
        self.output_dir = f"{config['output_dir_base']}/lr_{config['learning_rate']}_bs_{config['batch_size']}_samples_{config['num_samples']}"

    def run(self):
        # 1. 加载模型和Tokenizer
        model_loader = SFTModelLoader(self.model_name)
        tokenizer = model_loader.load_tokenizer()
        model = model_loader.load_model(use_4bit_quant=True)

        # 2. 配置并应用LoRA
        print("正在配置LoRA...")
        # 修改LoRA配置以微调更多层，增加rank值以提高模型表达能力
        lora_config = LoraConfig(
            r=config.lora_config.r,
            lora_alpha=config.lora_config.lora_alpha,
            target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
            lora_dropout=config.lora_config.lora_dropout,
            bias="none",
            task_type=TaskType.CAUSAL_LM
        )
        model = get_peft_model(model, lora_config)
        model.print_trainable_parameters()

        # 3. 加载并处理数据
        data_processor = SFTDataProcessor(tokenizer)
        train_dataset = data_processor.load_and_prepare_dataset(self.train_dataset_path,
                                                                num_samples=self.config['num_samples'])
        eval_dataset = data_processor.load_and_prepare_dataset(self.eval_dataset_path)  # 使用全部评估数据

        # 4. 配置训练参数
        training_arguments = TrainingArguments(
            output_dir=self.output_dir,
            per_device_train_batch_size=self.config['batch_size'],
            per_device_eval_batch_size=self.config['batch_size']//2,  # 减小评估批次大小
            gradient_accumulation_steps=8,
            optim="paged_adamw_32bit",
            save_strategy="no",
            logging_steps=5,
            learning_rate=self.config['learning_rate'],
            fp16=True,
            max_grad_norm=0.3,
            num_train_epochs=self.config['epochs'],
            warmup_ratio=0.03,
            group_by_length=True,
            lr_scheduler_type="constant",
            dataloader_pin_memory=False,  # 减少显存使用
        )

        # 5. 初始化指标计算器
        metrics_computer = MetricsComputer(tokenizer)

        # 6. 初始化微调器并开始训练
        fine_tuner = ERNIEFineTuner(model, tokenizer, train_dataset, eval_dataset, metrics_computer, training_arguments)
        train_result = fine_tuner.train()

        # 7. 保存训练好的模型
        self._save_model(fine_tuner, tokenizer)
        
        # 返回训练结果和空的评估指标（因为我们跳过了评估）
        empty_eval_metrics = {
            "eval_precision": 0.0,
            "eval_recall": 0.0,
            "eval_f1": 0.0
        }
        return train_result, empty_eval_metrics
    
    def _save_model(self, fine_tuner, tokenizer):
        """保存训练好的模型到指定路径"""
        # 创建模型保存目录
        model_save_path = os.path.join(self.output_dir, "final_model")
        os.makedirs(model_save_path, exist_ok=True)
        
        # 保存模型和tokenizer
        print(f"正在保存模型到: {model_save_path}")
        fine_tuner.trainer.model.save_pretrained(model_save_path)
        tokenizer.save_pretrained(model_save_path)
        print("模型保存完成！")
