import torch
import json
import os
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from peft import PeftModel, PeftConfig
from torch.nn import Sigmoid

class BERTLoRAMultiLabelClassifier:
    """
    BERT LoRA多标签文本分类器
    """
    
    def __init__(self, model_path, threshold=0.5):
        """
        初始化LoRA分类器
        
        Args:
            model_path (str): LoRA模型路径
            threshold (float): 分类阈值
        """
        self.device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
        self.threshold = threshold
        
        # 加载LoRA配置
        print(f"正在从 {model_path} 加载LoRA模型...")
        
        try:
            # 先加载标签列表以确定标签数量
            label_list_path = f"{model_path}/label_list.json"
            with open(label_list_path, 'r', encoding='utf-8') as f:
                temp_label_list = json.load(f)
            num_labels = len(temp_label_list)
            
            # 加载LoRA配置
            peft_config = PeftConfig.from_pretrained(model_path)
            
            # 加载基础模型，指定正确的标签数量
            base_model = AutoModelForSequenceClassification.from_pretrained(
                peft_config.base_model_name_or_path,
                num_labels=num_labels,
                problem_type="multi_label_classification",
                torch_dtype=torch.float32,
                device_map=None
            )
            
            # 加载LoRA适配器
            self.model = PeftModel.from_pretrained(base_model, model_path)
            
            # 加载分词器
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            
        except Exception as e:
            print(f"加载LoRA模型失败，尝试加载为普通模型: {e}")
            # 如果LoRA加载失败，尝试作为普通模型加载
            self.model = AutoModelForSequenceClassification.from_pretrained(
                model_path,
                torch_dtype=torch.float32,
                device_map=None
            )
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        
        # 移动模型到设备
        self.model = self.model.to(self.device)
        self.model.eval()
        
        # 加载标签列表
        label_list_path = f"{model_path}/label_list.json"
        with open(label_list_path, 'r', encoding='utf-8') as f:
            self.label_list = json.load(f)
        
        # 加载标签描述
        categories_path = "./data/categories.json"
        with open(categories_path, 'r', encoding='utf-8') as f:
            self.categories = json.load(f)
        
        # 尝试加载LoRA配置信息
        try:
            lora_config_path = f"{model_path}/lora_config.json"
            with open(lora_config_path, 'r', encoding='utf-8') as f:
                self.lora_config = json.load(f)
                print(f"LoRA配置: {self.lora_config}")
        except FileNotFoundError:
            self.lora_config = None
            print("未找到LoRA配置文件")
        
        print(f"模型加载完成，设备: {self.device}")
        print(f"支持的标签: {self.label_list}")
        
        # 打印模型参数信息
        if hasattr(self.model, 'print_trainable_parameters'):
            print("\n=== LoRA模型参数信息 ===")
            self.model.print_trainable_parameters()
    
    def predict(self, text, return_probabilities=False):
        """
        对单个文本进行预测
        
        Args:
            text (str): 输入文本
            return_probabilities (bool): 是否返回概率值
        
        Returns:
            dict: 预测结果
        """
        # 文本预处理
        inputs = self.tokenizer(
            text,
            truncation=True,
            padding='max_length',
            max_length=512,
            return_tensors='pt'
        )
        
        # 移动输入到设备
        inputs = {k: v.to(self.device) for k, v in inputs.items()}
        
        # 模型推理
        with torch.no_grad():
            outputs = self.model(**inputs)
            logits = outputs.logits
        
        # 计算概率
        sigmoid = Sigmoid()
        probabilities = sigmoid(logits).cpu().numpy()[0]
        
        # 二值化预测
        predictions = (probabilities > self.threshold).astype(int)
        
        # 构建结果
        result = {
            'text': text,
            'predicted_labels': [],
            'label_probabilities': {}
        }
        
        for i, (label, prob, pred) in enumerate(zip(self.label_list, probabilities, predictions)):
            result['label_probabilities'][label] = float(prob)
            if pred == 1:
                result['predicted_labels'].append(label)
        
        if return_probabilities:
            return result
        else:
            return {
                'text': text,
                'predicted_labels': result['predicted_labels']
            }
    
    def predict_batch(self, texts, return_probabilities=False):
        """
        对批量文本进行预测
        
        Args:
            texts (list): 文本列表
            return_probabilities (bool): 是否返回概率值
        
        Returns:
            list: 预测结果列表
        """
        results = []
        for text in texts:
            result = self.predict(text, return_probabilities)
            results.append(result)
        return results
    
    def explain_prediction(self, text):
        """
        解释预测结果
        
        Args:
            text (str): 输入文本
        
        Returns:
            dict: 详细的预测解释
        """
        result = self.predict(text, return_probabilities=True)
        
        explanation = {
            'text': text,
            'predicted_labels': result['predicted_labels'],
            'label_explanations': {},
            'model_info': {
                'model_type': 'LoRA',
                'threshold': self.threshold,
                'lora_config': self.lora_config
            }
        }
        
        # 按概率排序
        sorted_labels = sorted(
            result['label_probabilities'].items(),
            key=lambda x: x[1],
            reverse=True
        )
        
        for label, prob in sorted_labels:
            explanation['label_explanations'][label] = {
                'probability': prob,
                'predicted': label in result['predicted_labels'],
                'description': self.categories.get(label, "未知标签")
            }
        
        return explanation
    
    def compare_with_base_model(self, text, base_model_path):
        """
        与基础模型进行对比预测
        
        Args:
            text (str): 输入文本
            base_model_path (str): 基础模型路径
        
        Returns:
            dict: 对比结果
        """
        # LoRA模型预测
        lora_result = self.explain_prediction(text)
        
        try:
            # 加载基础模型进行对比
            base_model = AutoModelForSequenceClassification.from_pretrained(
                base_model_path,
                torch_dtype=torch.float32,
                device_map=None
            )
            base_tokenizer = AutoTokenizer.from_pretrained(base_model_path)
            base_model = base_model.to(self.device)
            base_model.eval()
            
            # 基础模型预测
            inputs = base_tokenizer(
                text,
                truncation=True,
                padding='max_length',
                max_length=512,
                return_tensors='pt'
            )
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            with torch.no_grad():
                outputs = base_model(**inputs)
                logits = outputs.logits
            
            sigmoid = Sigmoid()
            base_probabilities = sigmoid(logits).cpu().numpy()[0]
            base_predictions = (base_probabilities > self.threshold).astype(int)
            
            base_predicted_labels = []
            for i, (label, pred) in enumerate(zip(self.label_list, base_predictions)):
                if pred == 1:
                    base_predicted_labels.append(label)
            
            comparison = {
                'text': text,
                'lora_prediction': lora_result['predicted_labels'],
                'base_prediction': base_predicted_labels,
                'probability_differences': {},
                'label_changes': {
                    'added_by_lora': [],
                    'removed_by_lora': []
                }
            }
            
            # 计算概率差异
            for i, label in enumerate(self.label_list):
                lora_prob = lora_result['label_explanations'][label]['probability']
                base_prob = base_probabilities[i]
                comparison['probability_differences'][label] = {
                    'lora_prob': lora_prob,
                    'base_prob': float(base_prob),
                    'difference': lora_prob - float(base_prob)
                }
            
            # 分析标签变化
            lora_set = set(lora_result['predicted_labels'])
            base_set = set(base_predicted_labels)
            
            comparison['label_changes']['added_by_lora'] = list(lora_set - base_set)
            comparison['label_changes']['removed_by_lora'] = list(base_set - lora_set)
            
            return comparison
            
        except Exception as e:
            print(f"基础模型对比失败: {e}")
            return lora_result

def main():
    """
    演示LoRA推理功能
    """
    # LoRA模型路径
    model_path = "./models_lora/bert-multilabel-lora-optimized"
    
    try:
        # 初始化LoRA分类器
        classifier = BERTLoRAMultiLabelClassifier(model_path)
        
        # 测试样例
        test_texts = [
            "绩效奖金缩水了",
            "这阵子加班不多，能接受；工具很好用，从没掉链子",
            "权限申请复杂，卡在审批",
            "职业路径不清晰，晋升机会很少",
            "主管很支持我的工作，经常给予指导",
            "跨部门沟通效率低，信息不透明"
        ]
        
        print("\n=== LoRA批量预测示例 ===")
        results = classifier.predict_batch(test_texts)
        for result in results:
            print(f"文本: {result['text']}")
            print(f"预测标签: {result['predicted_labels']}")
            print("-" * 50)
        
        print("\n=== LoRA详细预测解释示例 ===")
        for text in test_texts[:3]:  # 只展示前3个
            explanation = classifier.explain_prediction(text)
            print(f"\n文本: {explanation['text']}")
            print(f"预测标签: {explanation['predicted_labels']}")
            print(f"模型信息: {explanation['model_info']}")
            print("\n各标签概率:")
            for label, info in explanation['label_explanations'].items():
                if info['probability'] > 0.1:  # 只显示概率大于0.1的标签
                    status = "✓" if info['predicted'] else "✗"
                    print(f"  {status} {label}: {info['probability']:.3f} - {info['description']}")
            print("-" * 80)
        
        # 与基础模型对比（如果存在）
        base_model_path = "./models/bert-multilabel"
        if os.path.exists(base_model_path):
            print("\n=== LoRA vs 基础模型对比 ===")
            comparison = classifier.compare_with_base_model(test_texts[0], base_model_path)
            if 'lora_prediction' in comparison:
                print(f"文本: {comparison['text']}")
                print(f"LoRA预测: {comparison['lora_prediction']}")
                print(f"基础模型预测: {comparison['base_prediction']}")
                print(f"LoRA新增标签: {comparison['label_changes']['added_by_lora']}")
                print(f"LoRA移除标签: {comparison['label_changes']['removed_by_lora']}")
                print("\n概率差异最大的标签:")
                sorted_diffs = sorted(
                    comparison['probability_differences'].items(),
                    key=lambda x: abs(x[1]['difference']),
                    reverse=True
                )
                for label, diff_info in sorted_diffs[:5]:
                    print(f"  {label}: LoRA={diff_info['lora_prob']:.3f}, Base={diff_info['base_prob']:.3f}, Diff={diff_info['difference']:.3f}")
        
        # 交互式预测
        print("\n=== 交互式LoRA预测 ===")
        print("输入文本进行预测（输入'quit'退出）:")
        
        while True:
            user_input = input("\n请输入文本: ").strip()
            if user_input.lower() == 'quit':
                break
            
            if user_input:
                explanation = classifier.explain_prediction(user_input)
                print(f"\n预测结果: {explanation['predicted_labels']}")
                print("\n详细概率:")
                for label, info in explanation['label_explanations'].items():
                    if info['probability'] > 0.1:  # 只显示概率大于0.1的标签
                        status = "✓" if info['predicted'] else "✗"
                        print(f"  {status} {label}: {info['probability']:.3f}")
    
    except FileNotFoundError:
        print(f"错误: 找不到LoRA模型文件 {model_path}")
        print("请先运行 main_lora.py 训练LoRA模型")
    except Exception as e:
        print(f"LoRA推理过程中出现错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()