# -*- coding: utf-8 -*-
"""
Qwen3-30B模型配置文件
支持4-bit量化和LoRA微调
"""

import torch
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    TrainingArguments
)
from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model
import warnings
warnings.filterwarnings("ignore")

# 模型配置  
MODEL_NAME = "./models/Qwen3-4B-Instruct-2507"  # 使用本地普通版本
MODEL_CACHE_DIR = "./models"

class ModelConfig:
    """模型配置类"""
    
    def __init__(self, use_4bit=True, use_lora=True):
        self.use_4bit = use_4bit
        self.use_lora = use_lora
        self.device = "mps" if torch.backends.mps.is_available() else "cpu"
        
    def get_quantization_config(self):
        """获取4-bit量化配置"""
        if not self.use_4bit:
            return None
            
        return BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_compute_dtype=torch.float16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4"
        )
    
    def get_lora_config(self):
        """获取LoRA配置"""
        if not self.use_lora:
            return None
            
        return LoraConfig(
            r=128,                    # rank - 可以调整为64或256
            lora_alpha=256,          # scaling factor
            target_modules=[         # 目标层
                "q_proj", "k_proj", 
                "v_proj", "o_proj",
                "gate_proj", "up_proj", "down_proj"
            ],
            lora_dropout=0.1,
            bias="none",
            task_type="CAUSAL_LM"
        )
    
    def load_model_and_tokenizer(self):
        """加载模型和分词器"""
        print(f"开始加载模型: {MODEL_NAME}")
        print(f"使用设备: {self.device}")
        
        # 加载分词器
        tokenizer = AutoTokenizer.from_pretrained(
            MODEL_NAME,
            cache_dir=MODEL_CACHE_DIR,
            trust_remote_code=True
        )
        
        # 设置pad_token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        # 加载模型，绕过FP8量化限制
        print("加载模型...")
        
        try:
            # 尝试正常加载（不指定quantization_config）
            model = AutoModelForCausalLM.from_pretrained(
                MODEL_NAME,
                cache_dir=MODEL_CACHE_DIR,
                device_map="auto" if self.device != "mps" else None,
                trust_remote_code=True,
                dtype=torch.float32 if self.device == "mps" else torch.float16
            )
        except RuntimeError as e:
            if "FP8 quantization" in str(e):
                print("⚠️ FP8量化不支持当前设备，尝试CPU模式...")
                # 回退到CPU加载，然后手动移动到MPS
                model = AutoModelForCausalLM.from_pretrained(
                    MODEL_NAME,
                    cache_dir=MODEL_CACHE_DIR,
                    device_map=None,
                    trust_remote_code=True,
                    dtype=torch.float32 if self.device == "mps" else torch.float16
                )
            else:
                raise e
        
        # MPS设备手动移动
        if self.device == "mps":
            print("移动模型到MPS设备...")
            model = model.to("mps")
        
        print(f"模型加载成功! 内存使用: {torch.cuda.memory_allocated() / 1024**3:.2f}GB" 
              if torch.cuda.is_available() else "模型加载成功!")
        
        return model, tokenizer
    
    def setup_lora_model(self, model):
        """配置LoRA模型"""
        if not self.use_lora:
            return model
            
        print("配置LoRA模型...")
        
        # FP8模型不需要prepare_model_for_kbit_training
        # 直接应用LoRA配置
        
        # 获取LoRA配置
        lora_config = self.get_lora_config()
        
        # 应用LoRA
        model = get_peft_model(model, lora_config)
        
        # 打印可训练参数
        trainable_params = 0
        all_param = 0
        for _, param in model.named_parameters():
            all_param += param.numel()
            if param.requires_grad:
                trainable_params += param.numel()
        
        print(f"可训练参数: {trainable_params:,}")
        print(f"总参数: {all_param:,}")
        print(f"可训练参数占比: {100 * trainable_params / all_param:.2f}%")
        
        return model
    
    def get_training_arguments(self, output_dir="./qwen3-university-lora"):
        """获取训练参数"""
        return TrainingArguments(
            output_dir=output_dir,
            num_train_epochs=3,
            per_device_train_batch_size=1,
            gradient_accumulation_steps=8,
            learning_rate=2e-4,
            weight_decay=0.01,
            fp16=True,
            logging_steps=50,
            save_steps=500,
            save_total_limit=3,
            warmup_steps=100,
            evaluation_strategy="steps",
            eval_steps=500,
            load_best_model_at_end=True,
            metric_for_best_model="loss",
            greater_is_better=False,
            dataloader_num_workers=2,
            remove_unused_columns=False,
            report_to="wandb",  # 使用wandb记录训练过程
            run_name="qwen3-foshan-university"
        )

def test_model_loading():
    """测试模型加载"""
    print("测试Qwen3模型加载...")
    
    config = ModelConfig(use_4bit=True, use_lora=True)
    
    try:
        # 加载模型和分词器
        model, tokenizer = config.load_model_and_tokenizer()
        
        # 配置LoRA
        model = config.setup_lora_model(model)
        
        # 测试推理
        test_prompt = "佛山大学是一所什么样的大学？"
        inputs = tokenizer(test_prompt, return_tensors="pt")
        
        if config.device != "cpu":
            inputs = {k: v.to(model.device) for k, v in inputs.items()}
        
        print(f"\n测试输入: {test_prompt}")
        
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_length=inputs['input_ids'].shape[1] + 50,
                do_sample=True,
                temperature=0.7,
                pad_token_id=tokenizer.eos_token_id
            )
        
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        print(f"模型输出: {response}")
        
        print("✅ 模型加载和测试成功!")
        return model, tokenizer
        
    except Exception as e:
        print(f"❌ 模型加载失败: {str(e)}")
        print("尝试使用CPU模式...")
        
        # 回退到CPU模式
        config_cpu = ModelConfig(use_4bit=False, use_lora=True)
        model, tokenizer = config_cpu.load_model_and_tokenizer()
        model = config_cpu.setup_lora_model(model)
        
        print("✅ CPU模式加载成功!")
        return model, tokenizer

if __name__ == "__main__":
    # 测试模型加载
    model, tokenizer = test_model_loading()