# -*- coding: utf-8 -*-
"""
Qwen3-4B LoRA微调训练脚本
基于佛山大学问答数据进行微调
"""

import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, Any
import torch
from datasets import load_from_disk
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling
)
from peft import get_peft_model, TaskType
from model_config import ModelConfig
import wandb
from datetime import datetime

class UniversityTrainer:
    """佛山大学问答系统微调训练器"""
    
    def __init__(
        self,
        model_name: str = "./models/Qwen3-4B-Instruct-2507",
        data_path: str = "./processed_data",
        output_dir: str = "./qwen3-university-lora",
        use_wandb: bool = False
    ):
        self.model_name = model_name
        self.data_path = Path(data_path)
        self.output_dir = Path(output_dir)
        self.use_wandb = use_wandb
        
        # 创建输出目录
        self.output_dir.mkdir(exist_ok=True)
        
        # 初始化组件
        self.model = None
        self.tokenizer = None
        self.datasets = None
        
    def initialize_wandb(self):
        """初始化Weights & Biases"""
        if not self.use_wandb:
            return
            
        wandb.init(
            project="foshan-university-qa",
            name=f"qwen3-4b-lora-{datetime.now().strftime('%Y%m%d_%H%M%S')}",
            config={
                "model": "Qwen3-4B-Instruct-2507",
                "method": "LoRA",
                "dataset": "FoShan University QA",
                "task": "Instruction Tuning"
            }
        )
    
    def load_model_and_tokenizer(self):
        """加载模型和分词器"""
        print(f"加载模型: {self.model_name}")
        
        # 初始化配置
        config = ModelConfig(use_4bit=True, use_lora=True)
        
        # 加载模型和分词器
        self.model, self.tokenizer = config.load_model_and_tokenizer()
        
        # 配置LoRA
        self.model = config.setup_lora_model(self.model)
        
        print("模型和分词器加载完成")
        
    def load_datasets(self):
        """加载处理后的数据集"""
        if not self.data_path.exists():
            raise FileNotFoundError(f"数据集不存在: {self.data_path}")
        
        print(f"加载数据集: {self.data_path}")
        self.datasets = load_from_disk(str(self.data_path))
        
        print(f"训练集大小: {len(self.datasets['train'])}")
        print(f"验证集大小: {len(self.datasets['validation'])}")
        print(f"测试集大小: {len(self.datasets['test'])}")
        
    def tokenize_function(self, examples):
        """分词函数"""
        # 分词
        tokenized = self.tokenizer(
            examples["text"],
            truncation=True,
            padding=False,
            max_length=512,
            return_overflowing_tokens=False,
        )
        
        # 设置labels为input_ids（语言模型训练）
        tokenized["labels"] = tokenized["input_ids"].copy()
        
        return tokenized
    
    def prepare_datasets(self):
        """准备训练数据集"""
        print("准备训练数据...")
        
        # 分词处理
        tokenized_datasets = self.datasets.map(
            self.tokenize_function,
            batched=True,
            remove_columns=self.datasets["train"].column_names,
            desc="分词处理"
        )
        
        return tokenized_datasets
    
    def get_training_arguments(self) -> TrainingArguments:
        """获取训练参数"""
        return TrainingArguments(
            output_dir=str(self.output_dir),
            num_train_epochs=3,
            per_device_train_batch_size=1,
            per_device_eval_batch_size=1,
            gradient_accumulation_steps=8,
            learning_rate=2e-4,
            weight_decay=0.01,
            fp16=False,  # MPS不支持fp16混合精度
            logging_steps=50,
            save_steps=500,
            save_total_limit=3,
            eval_strategy="steps",
            eval_steps=500,
            warmup_steps=100,
            load_best_model_at_end=True,
            metric_for_best_model="loss",
            greater_is_better=False,
            dataloader_num_workers=2,
            remove_unused_columns=False,
            report_to="wandb" if self.use_wandb else "none",
            run_name="qwen3-foshan-university",
            logging_dir=str(self.output_dir / "logs"),
            optim="adamw_torch",
            lr_scheduler_type="cosine",
            seed=42,
            data_seed=42,
        )
    
    def train(self):
        """开始训练"""
        print("开始LoRA微调训练...")
        
        # 初始化wandb
        self.initialize_wandb()
        
        # 加载模型和数据
        self.load_model_and_tokenizer()
        self.load_datasets()
        
        # 准备数据集
        tokenized_datasets = self.prepare_datasets()
        
        # 数据整理器
        data_collator = DataCollatorForLanguageModeling(
            tokenizer=self.tokenizer,
            mlm=False,  # 因果语言模型，不使用MLM
        )
        
        # 训练参数
        training_args = self.get_training_arguments()
        
        # 创建训练器
        trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=tokenized_datasets["train"],
            eval_dataset=tokenized_datasets["validation"],
            tokenizer=self.tokenizer,
            data_collator=data_collator,
        )
        
        # 开始训练
        print("🚀 开始训练...")
        train_result = trainer.train()
        
        # 保存模型
        print("保存模型...")
        trainer.save_model()
        trainer.save_state()
        
        # 保存训练结果
        with open(self.output_dir / "train_results.json", "w", encoding="utf-8") as f:
            json.dump(train_result.metrics, f, ensure_ascii=False, indent=2)
        
        # 评估模型
        print("评估模型...")
        eval_results = trainer.evaluate(eval_dataset=tokenized_datasets["test"])
        
        # 保存评估结果
        with open(self.output_dir / "eval_results.json", "w", encoding="utf-8") as f:
            json.dump(eval_results, f, ensure_ascii=False, indent=2)
        
        print(f"✅ 训练完成！模型保存到: {self.output_dir}")
        print(f"训练损失: {train_result.metrics.get('train_loss', 'N/A')}")
        print(f"验证损失: {eval_results.get('eval_loss', 'N/A')}")
        
        if self.use_wandb:
            wandb.finish()
        
        return train_result, eval_results
    
    def test_model(self, test_prompts: list = None):
        """测试微调后的模型"""
        if test_prompts is None:
            test_prompts = [
                "佛山大学的图书馆开放时间是什么？",
                "如何申请国家基金？",
                "学校有哪些学院？",
                "电子信息工程学院的联系方式是什么？"
            ]
        
        print("测试微调后的模型...")
        
        # 加载微调后的模型
        if self.model is None:
            self.load_model_and_tokenizer()
        
        self.model.eval()
        
        for prompt in test_prompts:
            print(f"\n问题: {prompt}")
            
            # 构建输入
            system_prompt = "你是佛山大学的智能问答助手。请根据提供的信息准确、友好地回答用户关于佛山大学的问题。"
            full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
            
            # 编码输入
            inputs = self.tokenizer(full_prompt, return_tensors="pt")
            if hasattr(self.model, 'device'):
                inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
            
            # 生成回答
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=256,
                    do_sample=True,
                    temperature=0.7,
                    top_p=0.9,
                    pad_token_id=self.tokenizer.eos_token_id,
                    eos_token_id=self.tokenizer.eos_token_id,
                )
            
            # 解码输出
            response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            answer = response.split("<|im_start|>assistant\n")[-1]
            
            print(f"回答: {answer}")

def main():
    parser = argparse.ArgumentParser(description="佛山大学问答系统微调训练")
    parser.add_argument("--model", default="./models/Qwen3-4B-Instruct-2507", help="模型路径")
    parser.add_argument("--data", default="./processed_data", help="数据集路径")
    parser.add_argument("--output", default="./qwen3-university-lora", help="输出目录")
    parser.add_argument("--wandb", action="store_true", help="使用wandb记录训练")
    parser.add_argument("--train", action="store_true", help="开始训练")
    parser.add_argument("--test", action="store_true", help="测试模型")
    
    args = parser.parse_args()
    
    # 创建训练器
    trainer = UniversityTrainer(
        model_name=args.model,
        data_path=args.data,
        output_dir=args.output,
        use_wandb=args.wandb
    )
    
    if args.train:
        # 开始训练
        trainer.train()
    elif args.test:
        # 测试模型
        trainer.test_model()
    else:
        print("请指定 --train 或 --test 参数")
        print("使用 --help 查看完整参数列表")

if __name__ == "__main__":
    main()