"""
模型预测器模块
负责加载训练好的模型并进行意图预测
"""

import torch
import numpy as np
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from typing import Dict, List, Union, Optional
import logging
import json
import time
from pathlib import Path

logger = logging.getLogger(__name__)


class IntentPredictor:
    """意图预测器"""
    
    def __init__(self, model_path: str):
        """
        初始化预测器
        
        Args:
            model_path: 模型路径
        """
        self.model_path = model_path
        self.model = None
        self.tokenizer = None
        self.label_mappings = {}
        self.max_length = 128
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
        # 加载模型和相关文件
        self._load_model()
        self._load_mappings()
        
    def _load_model(self):
        """加载模型和tokenizer"""
        try:
            logger.info(f"从 {self.model_path} 加载模型...")
            
            # 加载tokenizer
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
            
            # 加载模型
            self.model = AutoModelForSequenceClassification.from_pretrained(self.model_path)
            self.model.to(self.device)
            self.model.eval()
            
            logger.info(f"模型加载成功，设备: {self.device}")
            
        except Exception as e:
            logger.error(f"加载模型失败: {e}")
            raise
    
    def _load_mappings(self):
        """加载标签映射和配置"""
        try:
            # 加载标签映射
            mapping_file = Path(self.model_path) / "label_mappings.json"
            if mapping_file.exists():
                with open(mapping_file, 'r', encoding='utf-8') as f:
                    self.label_mappings = json.load(f)
                    
                logger.info(f"标签映射加载成功，类别数: {len(self.label_mappings)}")
            else:
                logger.warning("未找到标签映射文件")
            
            # 加载训练配置
            config_file = Path(self.model_path) / "training_config.json"
            if config_file.exists():
                with open(config_file, 'r', encoding='utf-8') as f:
                    config = json.load(f)
                    self.max_length = config.get('max_length', 128)
                    
                logger.info(f"配置加载成功，最大长度: {self.max_length}")
                
        except Exception as e:
            logger.warning(f"加载映射文件失败: {e}")
    
    def predict_single(self, text: str, return_confidence: bool = True, 
                      return_all_scores: bool = False) -> Dict:
        """
        预测单个文本的意图
        
        Args:
            text: 输入文本
            return_confidence: 是否返回置信度
            return_all_scores: 是否返回所有类别的分数
            
        Returns:
            Dict: 预测结果
        """
        if not self.model or not self.tokenizer:
            raise ValueError("模型尚未加载")
        
        # 文本预处理
        text = str(text).strip()
        if not text:
            return {'error': '输入文本为空'}
        
        try:
            # Tokenize
            inputs = self.tokenizer(
                text,
                truncation=True,
                padding='max_length',
                max_length=self.max_length,
                return_tensors='pt'
            )
            
            # 移动到设备
            inputs = {key: value.to(self.device) for key, value in inputs.items()}
            
            # 预测
            with torch.no_grad():
                outputs = self.model(**inputs)
                logits = outputs.logits
                probabilities = torch.nn.functional.softmax(logits, dim=-1)
                predicted_class_id = torch.argmax(probabilities, dim=-1).item()
                confidence = probabilities[0][predicted_class_id].item()
            
            # 获取意图名称
            intent = self.label_mappings.get(str(predicted_class_id), f"unknown_class_{predicted_class_id}")
            
            result = {
                'text': text,
                'intent': intent,
                'predicted_class_id': predicted_class_id
            }
            
            if return_confidence:
                result['confidence'] = confidence
            
            if return_all_scores:
                all_scores = {}
                for i, prob in enumerate(probabilities[0]):
                    class_intent = self.label_mappings.get(str(i), f"class_{i}")
                    all_scores[class_intent] = prob.item()
                result['all_scores'] = all_scores
            
            return result
            
        except Exception as e:
            logger.error(f"预测失败: {e}")
            return {'error': str(e)}
    
    def predict_batch(self, texts: List[str], batch_size: int = 32) -> List[Dict]:
        """
        批量预测文本意图
        
        Args:
            texts: 文本列表
            batch_size: 批次大小
            
        Returns:
            List[Dict]: 预测结果列表
        """
        if not texts:
            return []
        
        results = []
        
        # 分批处理
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            batch_results = self._predict_batch_internal(batch_texts)
            results.extend(batch_results)
        
        return results
    
    def _predict_batch_internal(self, texts: List[str]) -> List[Dict]:
        """内部批量预测方法"""
        try:
            # 批量tokenize
            inputs = self.tokenizer(
                texts,
                truncation=True,
                padding='max_length',
                max_length=self.max_length,
                return_tensors='pt'
            )
            
            # 移动到设备
            inputs = {key: value.to(self.device) for key, value in inputs.items()}
            
            # 批量预测
            with torch.no_grad():
                outputs = self.model(**inputs)
                logits = outputs.logits
                probabilities = torch.nn.functional.softmax(logits, dim=-1)
                predicted_classes = torch.argmax(probabilities, dim=-1)
                confidences = torch.max(probabilities, dim=-1)[0]
            
            # 处理结果
            results = []
            
            for i, text in enumerate(texts):
                predicted_class_id = predicted_classes[i].item()
                confidence = confidences[i].item()
                intent = self.label_mappings.get(str(predicted_class_id), f"unknown_class_{predicted_class_id}")
                
                results.append({
                    'text': text,
                    'intent': intent,
                    'confidence': confidence,
                    'predicted_class_id': predicted_class_id
                })
            
            return results
            
        except Exception as e:
            logger.error(f"批量预测失败: {e}")
            return [{'error': str(e)} for _ in texts]
    
    def get_intent_probabilities(self, text: str) -> Dict[str, float]:
        """
        获取所有意图的概率分布
        
        Args:
            text: 输入文本
            
        Returns:
            Dict: 意图到概率的映射
        """
        result = self.predict_single(text, return_all_scores=True)
        return result.get('all_scores', {})
    
    def predict_with_threshold(self, text: str, threshold: float = 0.5) -> Dict:
        """
        基于阈值的预测
        
        Args:
            text: 输入文本
            threshold: 置信度阈值
            
        Returns:
            Dict: 预测结果
        """
        result = self.predict_single(text, return_confidence=True)
        
        if 'error' in result:
            return result
        
        confidence = result.get('confidence', 0.0)
        
        if confidence < threshold:
            result['intent'] = 'uncertain'
            result['is_certain'] = False
        else:
            result['is_certain'] = True
        
        result['threshold'] = threshold
        return result
    
    def get_model_info(self) -> Dict:
        """
        获取模型信息
        
        Returns:
            Dict: 模型信息
        """
        info = {
            'model_path': self.model_path,
            'device': str(self.device),
            'max_length': self.max_length,
            'num_classes': len(self.label_mappings),
            'supported_intents': list(self.label_mappings.values())
        }
        
        if self.model:
            info['model_size'] = sum(p.numel() for p in self.model.parameters())
            info['vocab_size'] = self.tokenizer.vocab_size if self.tokenizer else 0
        
        return info
    
    def benchmark_performance(self, test_texts: List[str], num_runs: int = 3) -> Dict:
        """
        性能基准测试
        
        Args:
            test_texts: 测试文本列表
            num_runs: 运行次数
            
        Returns:
            Dict: 性能统计
        """
        logger.info(f"开始性能基准测试，文本数量: {len(test_texts)}, 运行次数: {num_runs}")
        
        single_times = []
        batch_times = []
        
        # 测试单个预测
        for _ in range(num_runs):
            start_time = time.time()
            for text in test_texts:
                self.predict_single(text, return_confidence=False)
            single_time = time.time() - start_time
            single_times.append(single_time)
        
        # 测试批量预测
        for _ in range(num_runs):
            start_time = time.time()
            self.predict_batch(test_texts)
            batch_time = time.time() - start_time
            batch_times.append(batch_time)
        
        stats = {
            'num_texts': len(test_texts),
            'num_runs': num_runs,
            'single_prediction': {
                'avg_total_time': np.mean(single_times),
                'avg_time_per_text': np.mean(single_times) / len(test_texts),
                'throughput_per_second': len(test_texts) / np.mean(single_times)
            },
            'batch_prediction': {
                'avg_total_time': np.mean(batch_times),
                'avg_time_per_text': np.mean(batch_times) / len(test_texts),
                'throughput_per_second': len(test_texts) / np.mean(batch_times)
            }
        }
        
        logger.info(f"性能测试完成: 单个预测 {stats['single_prediction']['throughput_per_second']:.1f} texts/sec, "
                   f"批量预测 {stats['batch_prediction']['throughput_per_second']:.1f} texts/sec")
        
        return stats


def load_predictor(model_path: str) -> IntentPredictor:
    """
    加载预测器
    
    Args:
        model_path: 模型路径
        
    Returns:
        IntentPredictor: 预测器实例
    """
    return IntentPredictor(model_path)


class PredictionPipeline:
    """预测流水线"""
    
    def __init__(self, model_path: str, confidence_threshold: float = 0.5):
        """
        初始化预测流水线
        
        Args:
            model_path: 模型路径
            confidence_threshold: 置信度阈值
        """
        self.predictor = load_predictor(model_path)
        self.confidence_threshold = confidence_threshold
    
    def process_conversation(self, conversation: List[str]) -> List[Dict]:
        """
        处理对话序列
        
        Args:
            conversation: 对话文本列表
            
        Returns:
            List[Dict]: 每句话的意图识别结果
        """
        results = []
        
        for i, text in enumerate(conversation):
            result = self.predictor.predict_with_threshold(text, self.confidence_threshold)
            result['turn_id'] = i
            results.append(result)
        
        return results
    
    def analyze_intent_distribution(self, texts: List[str]) -> Dict:
        """
        分析文本集合的意图分布
        
        Args:
            texts: 文本列表
            
        Returns:
            Dict: 意图分布统计
        """
        results = self.predictor.predict_batch(texts)
        
        intent_counts = {}
        confidence_scores = []
        
        for result in results:
            if 'error' not in result:
                intent = result['intent']
                intent_counts[intent] = intent_counts.get(intent, 0) + 1
                confidence_scores.append(result.get('confidence', 0))
        
        total_texts = len([r for r in results if 'error' not in r])
        
        distribution = {
            'intent_counts': intent_counts,
            'intent_percentages': {
                intent: count / total_texts * 100 
                for intent, count in intent_counts.items()
            },
            'total_texts': total_texts,
            'avg_confidence': np.mean(confidence_scores) if confidence_scores else 0,
            'confidence_stats': {
                'min': np.min(confidence_scores) if confidence_scores else 0,
                'max': np.max(confidence_scores) if confidence_scores else 0,
                'std': np.std(confidence_scores) if confidence_scores else 0
            }
        }
        
        return distribution


if __name__ == "__main__":
    # 测试预测器
    
    # 模拟一些测试数据
    test_texts = [
        "你好",
        "明天天气怎么样",
        "我要订票",
        "播放音乐",
        "再见"
    ]
    
    # 注意：这里需要实际的模型路径
    model_path = "./final-bert-intent-model"  # 替换为实际路径
    
    try:
        predictor = load_predictor(model_path)
        
        # 测试单个预测
        for text in test_texts:
            result = predictor.predict_single(text)
            print(f"文本: '{text}' -> 意图: {result.get('intent', 'error')}")
        
        # 测试批量预测
        batch_results = predictor.predict_batch(test_texts)
        print(f"\n批量预测完成，处理了 {len(batch_results)} 个文本")
        
        # 获取模型信息
        model_info = predictor.get_model_info()
        print(f"\n模型信息: {model_info}")
        
    except Exception as e:
        print(f"测试失败: {e}")
        print("请确保有可用的训练好的模型")
    
    print("\n预测器模块测试完成!") 