# util/evaluation.py - RAGAS评测模块

import os
import pandas as pd
from datetime import datetime
import logging
from typing import List, Dict, Any, Optional

# 尝试导入ragas相关库
try:
    from ragas import evaluate
    from ragas.metrics import (
        answer_relevancy,
        faithfulness,
        context_precision,
        context_recall,
        answer_correctness,
        answer_similarity
    )
    from datasets import Dataset
    from ragas.llms.base import BaseRagasLLM
    from langchain_core.language_models.llms import LLM
    from langchain_core.outputs import LLMResult
    from langchain_core.callbacks.manager import CallbackManagerForLLMRun
    from typing import Any, List, Optional
    RAGAS_AVAILABLE = True
except ImportError as e:
    print(f"RAGAS导入失败: {e}")
    print("请安装ragas: pip install ragas datasets langchain-core")
    RAGAS_AVAILABLE = False

logger = logging.getLogger("ragas_evaluation")

class DashScopeRagasLLM(BaseRagasLLM):
    """DashScope LLM的RAGAS兼容包装器"""
    
    def __init__(self, dashscope_llm):
        self.dashscope_llm = dashscope_llm
        super().__init__()
    
    def generate_text(
        self,
        prompt: str,
        n: int = 1,
        temperature: float = 1e-8,
        stop: Optional[List[str]] = None,
        callbacks=None,
        **kwargs,
    ) -> LLMResult:
        """生成文本 - RAGAS需要的接口"""
        try:
            # 使用DashScope LLM生成响应
            response = self.dashscope_llm.complete(prompt)
            text = response.text
            
            # 构建LLMResult对象
            from langchain_core.outputs import Generation, LLMResult
            generations = [[Generation(text=text)]]
            
            return LLMResult(generations=generations)
        
        except Exception as e:
            logger.error(f"DashScope LLM生成文本失败: {e}")
            raise
    
    async def agenerate_text(
        self,
        prompt: str,
        n: int = 1,
        temperature: float = 1e-8,
        stop: Optional[List[str]] = None,
        callbacks=None,
        **kwargs,
    ) -> LLMResult:
        """异步生成文本"""
        # 对于简单实现，直接调用同步版本
        return self.generate_text(prompt, n, temperature, stop, callbacks, **kwargs)
    
    def get_model_name(self) -> str:
        """获取模型名称"""
        return getattr(self.dashscope_llm, 'model_name', 'dashscope-llm')

class DashScopeEmbeddingsWrapper:
    """DashScope Embeddings的RAGAS兼容包装器"""
    
    def __init__(self, dashscope_embeddings):
        self.dashscope_embeddings = dashscope_embeddings
    
    def embed_query(self, text: str) -> List[float]:
        """嵌入查询文本"""
        try:
            # 使用DashScope embeddings生成嵌入
            response = self.dashscope_embeddings.get_query_embedding(text)
            return response
        except Exception as e:
            logger.error(f"DashScope嵌入查询失败: {e}")
            raise
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """嵌入文档列表"""
        try:
            # 使用DashScope embeddings生成文档嵌入
            responses = self.dashscope_embeddings.get_text_embedding_batch(texts)
            return responses
        except Exception as e:
            logger.error(f"DashScope嵌入文档失败: {e}")
            raise

class RAGEvaluator:
    """RAG系统评测类"""
    
    def __init__(self, llm=None, embeddings=None, output_dir="./evaluation_results"):
        """
        初始化RAG评测器
        
        Args:
            llm: 主程序中配置的LLM模型
            embeddings: 主程序中配置的embedding模型
            output_dir: 结果输出目录
        """
        self.llm = llm
        self.embeddings = embeddings
        self.output_dir = output_dir
        self.evaluation_data = []
        os.makedirs(output_dir, exist_ok=True)
        
        # 定义评测指标 - 使用更简单的指标组合，避免复杂依赖
        self.metrics = [
            answer_relevancy,
            faithfulness,
            context_precision,
            # 暂时注释掉需要ground truth的指标
            # context_recall,  # 需要ground truth
            # answer_correctness,  # 需要ground truth
            answer_similarity
        ]
        
        logger.info(f"RAG评测器初始化完成，结果将保存到: {output_dir}")
        if llm is not None:
            logger.info("已接收主程序LLM配置")
        if embeddings is not None:
            logger.info("已接收主程序Embedding配置")
    
    def add_query_result(self, query: str, response: str, contexts: List[str], 
                        ground_truth: Optional[str] = None, **kwargs):
        """
        添加查询结果用于后续评测
        
        Args:
            query: 用户查询
            response: 模型回答
            contexts: 检索到的上下文列表
            ground_truth: 真实答案（可选）
            **kwargs: 其他元数据
        """
        evaluation_record = {
            "question": query,
            "answer": str(response),  # 确保是字符串
            "contexts": contexts,
            "ground_truth": ground_truth or "",
            "timestamp": datetime.now().isoformat(),
            **kwargs
        }
        
        self.evaluation_data.append(evaluation_record)
        logger.info(f"已添加查询结果到评测数据集: '{query}'")
    
    def prepare_dataset(self) -> Optional[Dataset]:
        """准备ragas评测数据集"""
        if not self.evaluation_data:
            logger.warning("没有可用的评测数据")
            return None
        
        if not RAGAS_AVAILABLE:
            logger.error("RAGAS库未安装，无法进行评测")
            return None
        
        # 转换为pandas DataFrame
        df = pd.DataFrame(self.evaluation_data)
        
        # 确保必要的列存在
        required_columns = ["question", "answer", "contexts"]
        for col in required_columns:
            if col not in df.columns:
                logger.error(f"缺少必要列: {col}")
                return None
        
        # 转换为huggingface数据集格式
        dataset = Dataset.from_pandas(df)
        return dataset
    
    def create_ragas_llm(self):
        """创建RAGAS兼容的LLM包装器"""
        if self.llm is None:
            return None
        
        try:
            ragas_llm = DashScopeRagasLLM(self.llm)
            logger.info("已创建RAGAS兼容的DashScope LLM包装器")
            return ragas_llm
        except Exception as e:
            logger.error(f"创建RAGAS LLM包装器失败: {e}")
            return None
    
    def create_ragas_embeddings(self):
        """创建RAGAS兼容的Embeddings包装器"""
        if self.embeddings is None:
            return None
        
        try:
            ragas_embeddings = DashScopeEmbeddingsWrapper(self.embeddings)
            logger.info("已创建RAGAS兼容的DashScope Embeddings包装器")
            return ragas_embeddings
        except Exception as e:
            logger.error(f"创建RAGAS Embeddings包装器失败: {e}")
            return None
    
    def evaluate_rag_system(self, dataset_name: str = "default") -> Dict[str, Any]:
        """
        执行RAGAS评测
        
        Args:
            dataset_name: 数据集名称，用于保存结果
            
        Returns:
            评测结果字典
        """
        if not RAGAS_AVAILABLE:
            return {"error": "RAGAS库未安装"}
        
        dataset = self.prepare_dataset()
        if dataset is None:
            return {"error": "无法准备评测数据集"}
        
        try:
            logger.info("开始RAGAS评测...")
            
            # 创建RAGAS兼容的LLM和Embeddings
            ragas_llm = self.create_ragas_llm()
            ragas_embeddings = self.create_ragas_embeddings()
            
            # 执行评测，传递包装后的LLM和embeddings
            evaluation_kwargs = {}
            if ragas_llm is not None:
                evaluation_kwargs["llm"] = ragas_llm
            if ragas_embeddings is not None:
                evaluation_kwargs["embeddings"] = ragas_embeddings
            
            logger.info(f"使用配置进行评测: {list(evaluation_kwargs.keys())}")
            logger.info(f"使用的指标: {[metric.__class__.__name__ for metric in self.metrics]}")
            
            # 如果无法创建兼容的LLM，尝试不使用LLM的评测
            if ragas_llm is None:
                logger.warning("无法创建兼容的LLM，尝试使用不需要LLM的指标...")
                # 只使用不需要LLM的指标
                simple_metrics = [answer_similarity]  # 这个指标通常不需要LLM
                result = evaluate(
                    dataset=dataset,
                    metrics=simple_metrics,
                    **evaluation_kwargs
                )
            else:
                result = evaluate(
                    dataset=dataset,
                    metrics=self.metrics,
                    **evaluation_kwargs
                )
            
            # 保存结果
            self.save_evaluation_results(result, dataset_name)
            
            logger.info("RAGAS评测完成")
            return result
        
        except Exception as e:
            logger.error(f"RAGAS评测失败: {e}")
            import traceback
            logger.error(traceback.format_exc())
            
            # 尝试简化版的评测
            try:
                logger.info("尝试简化版评测...")
                simplified_result = self.simplified_evaluation(dataset)
                return simplified_result
            except Exception as e2:
                logger.error(f"简化版评测也失败: {e2}")
                return {"error": f"评测失败: {str(e)}"}
    
    def simplified_evaluation(self, dataset):
        """简化版评测，避免复杂的LLM依赖"""
        try:
            # 只使用最基本的指标
            from ragas.metrics import answer_similarity
            
            result = evaluate(
                dataset=dataset,
                metrics=[answer_similarity],
                # 不传递LLM，让ragas使用默认配置
            )
            
            logger.info("简化版评测完成")
            return result
        except Exception as e:
            logger.error(f"简化版评测失败: {e}")
            return {"error": "所有评测方法都失败"}
    
    def save_evaluation_results(self, result: Dict[str, Any], dataset_name: str):
        """保存评测结果到文件"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"rag_evaluation_{dataset_name}_{timestamp}.json"
        filepath = os.path.join(self.output_dir, filename)
        
        try:
            # 处理不同类型的result对象
            if hasattr(result, '__dict__'):
                result_dict = result.__dict__
            elif hasattr(result, '_asdict'):
                result_dict = result._asdict()
            else:
                result_dict = dict(result) if isinstance(result, dict) else {"result": str(result)}
            
            # 处理可能的非序列化对象
            serializable_result = {}
            for key, value in result_dict.items():
                try:
                    if hasattr(value, '__dict__'):
                        serializable_result[key] = str(value)
                    else:
                        serializable_result[key] = value
                except:
                    serializable_result[key] = str(value)
            
            # 保存为JSON
            import json
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(serializable_result, f, indent=2, ensure_ascii=False)
            
            # 同时保存为CSV便于查看
            csv_filename = f"rag_evaluation_{dataset_name}_{timestamp}.csv"
            csv_filepath = os.path.join(self.output_dir, csv_filename)
            
            metrics_data = []
            for metric_name, metric_value in serializable_result.items():
                if metric_name not in ['error', 'timestamp']:
                    try:
                        score = float(metric_value) if isinstance(metric_value, (int, float)) else metric_value
                        metrics_data.append({
                            "metric": metric_name,
                            "score": score,
                            "dataset": dataset_name,
                            "timestamp": timestamp
                        })
                    except (ValueError, TypeError):
                        metrics_data.append({
                            "metric": metric_name,
                            "score": str(metric_value),
                            "dataset": dataset_name,
                            "timestamp": timestamp
                        })
            
            if metrics_data:
                metrics_df = pd.DataFrame(metrics_data)
                metrics_df.to_csv(csv_filepath, index=False, encoding='utf-8')
                logger.info(f"指标详情已保存: {csv_filepath}")
            
            logger.info(f"评测结果已保存: {filepath}")
            
        except Exception as e:
            logger.error(f"保存评测结果时出错: {e}")
            # 创建简单的错误报告
            error_filepath = os.path.join(self.output_dir, f"error_report_{timestamp}.txt")
            with open(error_filepath, 'w', encoding='utf-8') as f:
                f.write(f"评测结果保存错误: {e}\n")
                f.write(f"结果类型: {type(result)}\n")
                f.write(f"结果内容: {str(result)[:500]}\n")
    
    def print_evaluation_summary(self, result: Dict[str, Any]):
        """打印评测结果摘要"""
        if "error" in result:
            print(f"❌ 评测错误: {result['error']}")
            return
        
        print("\n" + "="*80)
        print("📊 RAGAS 评测结果摘要")
        print("="*80)
        
        # 提取分数信息
        scores_found = False
        for metric_name, metric_score in result.items():
            if metric_name not in ["error", "timestamp"] and isinstance(metric_score, (int, float)):
                print(f"✅ {metric_name}: {metric_score:.4f}")
                scores_found = True
            elif metric_name not in ["error", "timestamp"]:
                print(f"✅ {metric_name}: {metric_score}")
                scores_found = True
        
        if not scores_found:
            print("⚠️  未找到可用的评分指标")
            if len(result) > 0:
                print("可用指标:")
                for key in result.keys():
                    if key not in ["error", "timestamp"]:
                        print(f"  - {key}")
        
        print("="*80)
    
    def get_evaluation_data_count(self) -> int:
        """获取当前评测数据数量"""
        return len(self.evaluation_data)
    
    def clear_evaluation_data(self):
        """清空评测数据"""
        self.evaluation_data.clear()
        logger.info("评测数据已清空")
    
    def is_ready(self) -> bool:
        """检查评测器是否就绪"""
        return RAGAS_AVAILABLE and len(self.evaluation_data) > 0

# 全局评测器实例（将在主程序中初始化）
global_evaluator = None

def initialize_global_evaluator(llm=None, embeddings=None):
    """初始化全局评测器"""
    global global_evaluator
    global_evaluator = RAGEvaluator(llm=llm, embeddings=embeddings)
    return global_evaluator

def get_global_evaluator():
    """获取全局评测器实例"""
    return global_evaluator

def setup_ragas_environment(llm=None, embeddings=None):
    """设置RAGAS环境"""
    global global_evaluator
    
    if not RAGAS_AVAILABLE:
        logger.warning("RAGAS不可用，请安装: pip install ragas datasets langchain-core")
        # 即使RAGAS不可用，也创建一个基本的评测器来避免None错误
        global_evaluator = RAGEvaluator(llm=llm, embeddings=embeddings)
        return False
    
    try:
        # 初始化全局评测器
        global_evaluator = initialize_global_evaluator(llm=llm, embeddings=embeddings)
        
        logger.info("RAGAS环境设置完成")
        return True
    except Exception as e:
        logger.error(f"RAGAS环境设置失败: {e}")
        # 创建基本的评测器来避免None错误
        global_evaluator = RAGEvaluator(llm=llm, embeddings=embeddings)
        return False

def is_evaluator_ready():
    """检查评测器是否就绪且可用"""
    return global_evaluator is not None and global_evaluator.is_ready()