"""预训练效果评估（困惑度PPL）"""
"""
预训练模型评估器
用于评估增量预训练模型的性能
"""
import os
import torch
import json
import numpy as np
from typing import List, Dict, Any, Optional
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import datasets
from fin_senti_entity_platform.utils.logger import Logger
from fin_senti_entity_platform.utils.config_loader import ConfigLoader


class PretrainEvaluator:
    """预训练模型评估器"""
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        初始化预训练评估器
        
        Args:
            config: 配置参数，如果为None则从配置文件加载
        """
        # 如果没有提供配置，则从配置文件加载
        if config is None:
            config = ConfigLoader().get('pretrain_evaluator', {})
            
        self.config = config
        self.logger = Logger().get_logger(__name__)
        
        # 配置参数
        self.max_seq_length = config.get('max_seq_length', 1024)
        self.batch_size = config.get('batch_size', 8)
        
        # 初始化模型和分词器
        self.model = None
        self.tokenizer = None
    
    def load_model(self, model_path: str):
        """
        加载预训练模型
        
        Args:
            model_path: 模型路径
        """
        try:
            self.logger.info(f"加载预训练模型: {model_path}")
            
            # 加载分词器
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            
            # 设置pad_token_id（如果不存在）
            if self.tokenizer.pad_token_id is None:
                self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
                
            # 加载模型
            self.model = AutoModelForCausalLM.from_pretrained(
                model_path,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto" if torch.cuda.is_available() else None,
                use_cache=False
            )
            
            self.logger.info("成功加载预训练模型")
            
        except Exception as e:
            self.logger.error(f"加载预训练模型失败: {str(e)}")
            raise
    
    def calculate_perplexity(self, data_files: List[str]) -> float:
        """
        计算模型的困惑度
        
        Args:
            data_files: 数据文件列表
        
        Returns:
            float: 困惑度分数
        """
        try:
            self.logger.info(f"计算模型困惑度，数据文件数量: {len(data_files)}")
            
            # 加载数据集
            dataset = datasets.load_dataset(
                'json',
                data_files=data_files,
                split='train'
            )
            
            self.logger.info(f"数据集大小: {len(dataset)}")
            
            # 预处理函数
            def preprocess_function(examples):
                return self.tokenizer(
                    examples['text'],
                    truncation=True,
                    max_length=self.max_seq_length,
                    return_overflowing_tokens=True,
                    return_length=True
                )
            
            # 映射预处理函数
            tokenized_dataset = dataset.map(
                preprocess_function,
                batched=True,
                remove_columns=dataset.column_names
            )
            
            # 过滤空样本
            tokenized_dataset = tokenized_dataset.filter(
                lambda x: len(x['input_ids']) > 0
            )
            
            # 创建数据加载器
            dataloader = torch.utils.data.DataLoader(
                tokenized_dataset,
                batch_size=self.batch_size
            )
            
            # 设置模型为评估模式
            self.model.eval()
            
            # 计算困惑度
            total_loss = 0
            total_tokens = 0
            
            with torch.no_grad():
                for step, batch in enumerate(dataloader):
                    # 准备数据
                    input_ids = batch['input_ids'].to(self.model.device)
                    
                    # 前向传播
                    outputs = self.model(input_ids=input_ids, labels=input_ids)
                    loss = outputs.loss
                    
                    # 累积损失
                    total_loss += loss.item() * input_ids.size(0)
                    total_tokens += input_ids.size(0) * input_ids.size(1)
                    
                    # 打印进度
                    if step % 10 == 0:
                        self.logger.info(f"处理批次 {step}/{len(dataloader)}")
            
            # 计算困惑度
            avg_loss = total_loss / len(dataloader)
            perplexity = np.exp(avg_loss)
            
            self.logger.info(f"困惑度计算完成: {perplexity:.4f}")
            
            return perplexity
            
        except Exception as e:
            self.logger.error(f"计算困惑度失败: {str(e)}")
            raise
    
    def evaluate_financial_knowledge(self, test_data: List[Dict[str, Any]]) -> Dict[str, float]:
        """
        评估模型的金融知识理解能力
        
        Args:
            test_data: 测试数据，包含金融问答对
        
        Returns:
            Dict[str, float]: 评估指标
        """
        try:
            self.logger.info(f"评估模型的金融知识理解能力，测试样本数量: {len(test_data)}")
            
            # 创建文本生成管道
            generator = pipeline(
                "text-generation",
                model=self.model,
                tokenizer=self.tokenizer,
                device=self.model.device,
                max_length=200
            )
            
            # 评估结果
            correct_count = 0
            
            # 评估每个问题
            for i, item in enumerate(test_data):
                question = item.get('question', '')
                correct_answer = item.get('answer', '')
                
                if not question or not correct_answer:
                    continue
                    
                # 生成答案
                prompt = f"Q: {question}\nA: "
                generated = generator(
                    prompt,
                    do_sample=True,
                    temperature=0.7,
                    top_k=50,
                    top_p=0.95
                )
                
                # 提取生成的答案
                generated_answer = generated[0]['generated_text'].split('A: ')[1].strip()
                
                # 简单的匹配逻辑（实际应用中可能需要更复杂的评估方法）
                # 这里使用模糊匹配来判断答案是否正确
                if correct_answer.lower() in generated_answer.lower():
                    correct_count += 1
                    
                # 打印进度
                if (i + 1) % 10 == 0:
                    self.logger.info(f"评估进度: {i + 1}/{len(test_data)}")
            
            # 计算准确率
            accuracy = correct_count / len(test_data) if test_data else 0
            
            metrics = {
                'accuracy': accuracy,
                'correct_count': correct_count,
                'total_count': len(test_data)
            }
            
            self.logger.info(f"金融知识理解能力评估完成: {metrics}")
            
            return metrics
            
        except Exception as e:
            self.logger.error(f"评估金融知识理解能力失败: {str(e)}")
            raise
    
    def evaluate_entity_sentiment_ability(self, test_data: List[Dict[str, Any]]) -> Dict[str, float]:
        """
        评估模型的实体级情感分析能力
        
        Args:
            test_data: 测试数据，包含文本、实体和情感标签
        
        Returns:
            Dict[str, float]: 评估指标
        """
        try:
            self.logger.info(f"评估模型的实体级情感分析能力，测试样本数量: {len(test_data)}")
            
            # 创建文本生成管道
            generator = pipeline(
                "text-generation",
                model=self.model,
                tokenizer=self.tokenizer,
                device=self.model.device,
                max_length=300
            )
            
            # 评估结果
            correct_count = 0
            total_count = 0
            
            # 评估每个样本
            for i, item in enumerate(test_data):
                text = item.get('text', '')
                entities = item.get('entities', [])
                
                if not text or not entities:
                    continue
                    
                # 为每个实体生成情感分析结果
                for entity in entities:
                    entity_text = entity.get('text', '')
                    expected_sentiment = entity.get('sentiment', '')
                    
                    if not entity_text or not expected_sentiment:
                        continue
                        
                    # 生成情感分析结果
                    prompt = f"文本: {text}\n实体: {entity_text}\n该实体在文本中的情感倾向是（积极/消极/中性）？"
                    generated = generator(
                        prompt,
                        do_sample=True,
                        temperature=0.7,
                        top_k=50,
                        top_p=0.95
                    )
                    
                    # 提取生成的情感倾向
                    generated_sentiment = generated[0]['generated_text'].split('？')[-1].strip()
                    
                    # 简单的匹配逻辑（实际应用中可能需要更复杂的评估方法）
                    if expected_sentiment.lower() in generated_sentiment.lower():
                        correct_count += 1
                        
                    total_count += 1
                    
                # 打印进度
                if (i + 1) % 10 == 0:
                    self.logger.info(f"评估进度: {i + 1}/{len(test_data)}")
            
            # 计算准确率
            accuracy = correct_count / total_count if total_count else 0
            
            metrics = {
                'accuracy': accuracy,
                'correct_count': correct_count,
                'total_count': total_count
            }
            
            self.logger.info(f"实体级情感分析能力评估完成: {metrics}")
            
            return metrics
            
        except Exception as e:
            self.logger.error(f"评估实体级情感分析能力失败: {str(e)}")
            raise
    
    def run_comprehensive_evaluation(self, model_path: str, eval_config: Dict[str, Any]) -> Dict[str, Any]:
        """
        运行全面的模型评估
        
        Args:
            model_path: 模型路径
            eval_config: 评估配置
        
        Returns:
            Dict[str, Any]: 综合评估结果
        """
        try:
            self.logger.info(f"运行全面的模型评估，模型路径: {model_path}")
            
            # 加载模型
            self.load_model(model_path)
            
            # 初始化评估结果
            evaluation_results = {
                'model_path': model_path,
                'timestamp': int(os.path.getmtime(model_path))
            }
            
            # 计算困惑度
            if 'perplexity_data_files' in eval_config:
                perplexity = self.calculate_perplexity(eval_config['perplexity_data_files'])
                evaluation_results['perplexity'] = perplexity
            
            # 评估金融知识理解能力
            if 'financial_knowledge_data' in eval_config:
                financial_knowledge_metrics = self.evaluate_financial_knowledge(
                    eval_config['financial_knowledge_data']
                )
                evaluation_results['financial_knowledge'] = financial_knowledge_metrics
            
            # 评估实体级情感分析能力
            if 'entity_sentiment_data' in eval_config:
                entity_sentiment_metrics = self.evaluate_entity_sentiment_ability(
                    eval_config['entity_sentiment_data']
                )
                evaluation_results['entity_sentiment'] = entity_sentiment_metrics
            
            # 保存评估结果
            if 'output_file' in eval_config:
                output_file = eval_config['output_file']
                os.makedirs(os.path.dirname(output_file), exist_ok=True)
                
                with open(output_file, 'w', encoding='utf-8') as f:
                    json.dump(evaluation_results, f, ensure_ascii=False, indent=2)
                
                self.logger.info(f"评估结果已保存到: {output_file}")
            
            self.logger.info("全面的模型评估完成")
            
            return evaluation_results
            
        except Exception as e:
            self.logger.error(f"全面的模型评估失败: {str(e)}")
            raise