import torch
import json
import os
import numpy as np
from datasets import load_dataset, Dataset
from transformers import (
    AutoModelForSequenceClassification, 
    AutoTokenizer, 
    TrainingArguments, 
    Trainer,
    EvalPrediction,
    EarlyStoppingCallback
)
from peft import LoraConfig, get_peft_model, TaskType
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, multilabel_confusion_matrix
from torch.nn import BCEWithLogitsLoss
import warnings
warnings.filterwarnings('ignore')

# 启用MPS GPU加速（M4 Pro优化）
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")

# 模型配置 - 使用中文BERT模型
MODEL_NAME = "bert-base-chinese"

def load_categories(categories_path):
    """
    加载标签分类信息
    
    Args:
        categories_path (str): categories.json文件路径
    
    Returns:
        dict: 标签到描述的映射
        list: 标签列表
    """
    with open(categories_path, 'r', encoding='utf-8') as f:
        categories = json.load(f)
    
    label_list = list(categories.keys())
    return categories, label_list

def load_local_dataset(data_path, label_list):
    """
    加载本地JSONL数据集并转换为多标签分类格式
    
    Args:
        data_path (str): JSONL数据文件路径
        label_list (list): 标签列表
    
    Returns:
        Dataset: HuggingFace Dataset对象
    """
    if not os.path.exists(data_path):
        raise FileNotFoundError(f"数据文件不存在: {data_path}")
    
    data = []
    
    with open(data_path, 'r', encoding='utf-8') as f:
        for line_num, line in enumerate(f, 1):
            try:
                item = json.loads(line.strip())
                
                # 验证必需字段
                if 'text' not in item:
                    print(f"警告：第{line_num}行缺少'text'字段，跳过")
                    continue
                
                # 处理标签
                labels = item.get('labels', [])
                negated = item.get('negated', [])
                
                # 创建多标签向量
                label_vector = [0] * len(label_list)
                for label in labels:
                    if label in label_list and label not in negated:
                        label_vector[label_list.index(label)] = 1
                
                data.append({
                    'text': item['text'],
                    'labels': label_vector
                })
                
            except json.JSONDecodeError:
                print(f"警告：第{line_num}行JSON格式错误，跳过")
                continue
    
    print(f"成功加载 {len(data)} 条数据")
    return Dataset.from_list(data)

def tokenize_function(examples, tokenizer, max_length=512):
    """
    数据预处理函数：对文本进行分词
    
    Args:
        examples: 批量数据
        tokenizer: 分词器
        max_length: 最大序列长度
    
    Returns:
        dict: 分词后的数据
    """
    # 对文本进行分词
    tokenized = tokenizer(
        examples['text'],
        truncation=True,
        padding='max_length',
        max_length=max_length,
        return_tensors=None
    )
    
    # 添加标签
    tokenized['labels'] = examples['labels']
    
    return tokenized

def compute_metrics(eval_pred: EvalPrediction, label_list):
    """
    计算多标签分类评估指标
    
    Args:
        eval_pred: 预测结果
        label_list: 标签列表
    
    Returns:
        dict: 评估指标
    """
    predictions, labels = eval_pred
    
    # 将logits转换为概率，然后二值化
    sigmoid = torch.nn.Sigmoid()
    probs = sigmoid(torch.Tensor(predictions))
    y_pred = (probs > 0.5).int().numpy()
    y_true = labels.astype(int)
    
    # 计算各种指标
    # 样本级别的准确率（完全匹配）
    exact_match = accuracy_score(y_true, y_pred)
    
    # 标签级别的指标
    precision, recall, f1, _ = precision_recall_fscore_support(
        y_true, y_pred, average='macro', zero_division=0
    )
    
    # 微平均指标
    precision_micro, recall_micro, f1_micro, _ = precision_recall_fscore_support(
        y_true, y_pred, average='micro', zero_division=0
    )
    
    return {
        'exact_match': exact_match,
        'f1_macro': f1,
        'precision_macro': precision,
        'recall_macro': recall,
        'f1_micro': f1_micro,
        'precision_micro': precision_micro,
        'recall_micro': recall_micro,
    }

class OptimizedMultiLabelLoRATrainer(Trainer):
    """
    优化的LoRA Trainer类，用于多标签分类
    """
    def __init__(self, label_smoothing=0.1, **kwargs):
        """
        初始化训练器
        
        Args:
            label_smoothing (float): 标签平滑参数，用于防止过拟合
        """
        super().__init__(**kwargs)
        self.label_smoothing = label_smoothing
    
    def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
        """
        计算多标签分类损失，包含标签平滑
        """
        labels = inputs.pop("labels")
        outputs = model(**inputs)
        logits = outputs.logits
        
        # 应用标签平滑
        if self.label_smoothing > 0:
            # 标签平滑：将硬标签软化
            smoothed_labels = labels * (1 - self.label_smoothing) + self.label_smoothing / 2
        else:
            smoothed_labels = labels
        
        # 使用BCEWithLogitsLoss进行多标签分类
        loss_fct = BCEWithLogitsLoss()
        loss = loss_fct(logits, smoothed_labels.float())
        
        return (loss, outputs) if return_outputs else loss

def setup_optimized_lora_model(model, lora_config):
    """
    设置优化的LoRA模型配置
    
    Args:
        model: 基础模型
        lora_config: LoRA配置
    
    Returns:
        model: 配置了LoRA的模型
    """
    # 应用LoRA配置
    model = get_peft_model(model, lora_config)
    
    # 打印可训练参数信息
    model.print_trainable_parameters()
    
    return model

def main():
    """
    主训练函数 - 使用优化的LoRA微调
    """
    print("=== BERT多标签文本分类优化LoRA微调 ===")
    
    # 加载标签信息
    categories_path = './data/categories.json'
    categories, label_list = load_categories(categories_path)
    num_labels = len(label_list)
    
    print(f"标签数量: {num_labels}")
    print(f"标签列表: {label_list}")
    
    # 加载分词器
    print("正在加载分词器...")
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    
    # 加载数据集
    print("正在加载数据集...")
    train_dataset = load_local_dataset('./data/train.jsonl', label_list)
    valid_dataset = load_local_dataset('./data/valid.jsonl', label_list)
    test_dataset = load_local_dataset('./data/test.jsonl', label_list)
    
    # 数据预处理 - 使用更短的序列长度以提高速度
    print("正在进行数据预处理...")
    max_length = 256  # 减少序列长度从512到256，提高训练速度
    
    train_dataset = train_dataset.map(
        lambda x: tokenize_function(x, tokenizer, max_length),
        batched=True,
        remove_columns=['text'],
        num_proc=4  # 使用多进程加速数据处理
    )
    
    valid_dataset = valid_dataset.map(
        lambda x: tokenize_function(x, tokenizer, max_length),
        batched=True,
        remove_columns=['text'],
        num_proc=4
    )
    
    test_dataset = test_dataset.map(
        lambda x: tokenize_function(x, tokenizer, max_length),
        batched=True,
        remove_columns=['text'],
        num_proc=4
    )
    
    # 加载基础模型
    print("正在加载BERT模型...")
    model = AutoModelForSequenceClassification.from_pretrained(
        MODEL_NAME,
        num_labels=num_labels,
        problem_type="multi_label_classification",
        torch_dtype=torch.float32,
        device_map=None,
        trust_remote_code=True
    )
    
    # 优化的LoRA配置参数 - 减少计算复杂度
    lora_config = LoraConfig(
        task_type=TaskType.SEQ_CLS,  # 序列分类任务
        inference_mode=False,
        r=8,  # 减少LoRA的秩从16到8，减少参数量和计算量
        lora_alpha=16,  # 相应调整alpha值
        lora_dropout=0.05,  # 减少dropout
        target_modules=["query", "value", "key", "dense"],  # 增加更多目标模块以提高效果
        bias="none",  # 不训练bias
        modules_to_save=["classifier"],  # 保存分类器层
    )
    
    # 应用LoRA配置
    print("正在配置优化LoRA...")
    model = setup_optimized_lora_model(model, lora_config)
    
    # 手动移动模型到设备
    model = model.to(device)
    print(f"模型已加载到设备: {device}")
    
    # 优化的训练参数配置
    training_args = TrainingArguments(
        output_dir="./models_lora/bert-multilabel-lora-optimized",
        num_train_epochs=2,
        per_device_train_batch_size=32,  # 增加batch size以提高GPU利用率
        per_device_eval_batch_size=64,  # 增加评估batch size
        gradient_accumulation_steps=2,  # 使用梯度累积模拟更大的batch size
        warmup_ratio=0.05,  # 减少warmup比例
        learning_rate=5e-4,  # 使用更高的学习率
        weight_decay=0.01,
        logging_dir='./logs_lora_optimized',
        logging_steps=50,  # 减少日志频率
        eval_strategy="steps",
        eval_steps=200,  # 减少评估频率
        save_strategy="steps",
        save_steps=200,  # 减少保存频率
        save_total_limit=2,  # 减少保存的检查点数量
        load_best_model_at_end=True,
        metric_for_best_model="eval_loss",
        greater_is_better=False,
        fp16=False,  # MPS暂不支持fp16
        dataloader_pin_memory=True,  # 启用pin memory
        dataloader_num_workers=2,  # 使用多个worker
        remove_unused_columns=False,
        report_to=[],
        no_cuda=True,
        # 优化相关参数
        dataloader_drop_last=True,  # 丢弃最后一个不完整的batch
        prediction_loss_only=True,  # 只计算损失，不计算其他指标（训练时）
        skip_memory_metrics=True,  # 跳过内存指标计算
    )
    
    # 创建训练器
    trainer = OptimizedMultiLabelLoRATrainer(
        label_smoothing=0.05,  # 减少标签平滑
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=valid_dataset,
        compute_metrics=lambda x: compute_metrics(x, label_list),
        callbacks=[EarlyStoppingCallback(early_stopping_patience=2)]  # 减少早停耐心值
    )
    
    # 训练前验证
    print("\n=== 训练前验证 ===")
    print(f"训练集大小: {len(train_dataset)}")
    print(f"验证集大小: {len(valid_dataset)}")
    print(f"测试集大小: {len(test_dataset)}")
    print(f"设备类型: {device}")
    print(f"模型设备: {next(model.parameters()).device}")
    print(f"最大序列长度: {max_length}")
    print(f"有效batch size: {training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps}")
    
    # 开始训练
    print("\n=== 开始优化LoRA训练 ===")
    try:
        trainer.train()
        print("优化LoRA训练完成")
    except Exception as e:
        print(f"训练过程中出现错误: {e}")
        raise
    
    # 在测试集上评估
    print("\n=== 测试集评估 ===")
    test_results = trainer.evaluate(eval_dataset=test_dataset)
    print("测试集结果:")
    for key, value in test_results.items():
        print(f"{key}: {value:.4f}")
    
    # 保存优化LoRA模型和分词器
    print("\n=== 保存优化LoRA模型 ===")
    model.save_pretrained("./models_lora/bert-multilabel-lora-optimized")
    tokenizer.save_pretrained("./models_lora/bert-multilabel-lora-optimized")
    
    # 保存标签信息
    with open("./models_lora/bert-multilabel-lora-optimized/label_list.json", 'w', encoding='utf-8') as f:
        json.dump(label_list, f, ensure_ascii=False, indent=2)
    
    # 保存优化LoRA配置信息
    lora_config_dict = {
        "task_type": "SEQ_CLS",
        "r": 8,
        "lora_alpha": 16,
        "lora_dropout": 0.05,
        "target_modules": ["query", "value", "key", "dense"],
        "bias": "none",
        "modules_to_save": ["classifier"],
        "base_model": MODEL_NAME,
        "max_length": max_length,
        "optimization_notes": "优化版本：减少rank、增加batch size、减少序列长度、增加目标模块"
    }
    
    with open("./models_lora/bert-multilabel-lora-optimized/lora_config.json", 'w', encoding='utf-8') as f:
        json.dump(lora_config_dict, f, ensure_ascii=False, indent=2)
    
    print("优化LoRA模型保存完成")
    print(f"模型保存路径: ./models_lora/bert-multilabel-lora-optimized")
    

if __name__ == "__main__":
    main()