import sys
import os
import json
import logging
from pathlib import Path
from typing import List, Dict, Any, Optional
from datetime import datetime

# 添加项目根目录到Python路径
project_root = str(Path(__file__).parent.parent)
sys.path.append(project_root)

import numpy as np
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from trulens_eval import Feedback, Tru, TruLlama
from trulens_eval import OpenAI as TruOpenAI
from trulens_eval.feedback import Groundedness
from trulens_eval.feedback.provider.openai import OpenAI as OpenAIProvider
from modular_rag.search.strategies import VectorSearchStrategy
from modular_rag.prediction.predictor import PredictionModule
from modular_rag.fusion.fusion import FusionModule


class RAGEvaluator:
    """使用TruLens评估RAG系统"""
    
    def __init__(self, 
                 openai_api_key: str,
                 model_name: str = "gpt-3.5-turbo",
                 embedding_model: str = "BAAI/bge-large-zh-v1.5",
                 db_path: str = "data/vector_store",
                 save_path: str = "results/trulens_evaluation"):
        self.openai_api_key = openai_api_key
        self.model_name = model_name
        self.embedding_model = embedding_model
        self.db_path = db_path
        self.save_path = save_path
        
        # 初始化组件
        self._initialize_components()
        
        # 初始化日志
        self.logger = logging.getLogger(__name__)
        self._setup_logging()
        
    def _setup_logging(self):
        """设置日志"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
        )
        
    def _initialize_components(self):
        """初始化所有组件"""
        # 初始化RAG组件
        self.search_strategy = VectorSearchStrategy()
        self.prediction_module = PredictionModule()
        self.fusion_module = FusionModule()
        
        # 初始化LangChain组件
        self.embeddings = HuggingFaceEmbeddings(
            model_name=self.embedding_model
        )
        self.vectorstore = Chroma(
            persist_directory=self.db_path,
            embedding_function=self.embeddings
        )
        self.llm = ChatOpenAI(
            model_name=self.model_name,
            openai_api_key=self.openai_api_key,
            temperature=0
        )
        
        # 初始化TruLens组件
        self.tru = Tru()
        self.openai = OpenAIProvider(api_key=self.openai_api_key)
        self.groundedness = Groundedness(groundedness_provider=self.openai)
        
        # 设置评估反馈
        self._setup_feedback()
        
    def _setup_feedback(self):
        """设置评估反馈指标"""
        # 问答相关性
        self.qa_relevance = Feedback(
            self.openai.relevance_with_cot_reasons,
            name="问答相关性"
        ).on_input_output()
        
        # 答案完整性
        self.answer_completeness = Feedback(
            self.openai.completeness_with_cot_reasons,
            name="答案完整性"
        ).on_output()
        
        # 事实准确性
        self.factual_accuracy = Feedback(
            self.groundedness.groundedness_measure_with_cot_reasons,
            name="事实准确性"
        ).on_output()
        
        # 答案一致性
        self.answer_consistency = Feedback(
            self.openai.consistency_with_cot_reasons,
            name="答案一致性"
        ).on_output()
        
        # 有害内容检测
        self.harmfulness = Feedback(
            self.openai.harmfulness_with_cot_reasons,
            name="有害内容检测"
        ).on_output()
        
    def _create_rag_chain(self):
        """创建RAG处理链"""
        # 创建提示模板
        template = """基于以下上下文回答问题。如果无法从上下文中找到答案，请说明无法回答。

上下文：
{context}

问题：{question}

请提供详细的答案："""
        
        prompt = ChatPromptTemplate.from_template(template)
        
        # 创建RAG链
        def format_docs(docs):
            return "\n\n".join(doc.page_content for doc in docs)
            
        rag_chain = (
            {"context": self.vectorstore.as_retriever() | format_docs, 
             "question": RunnablePassthrough()}
            | prompt
            | self.llm
        )
        
        return rag_chain
        
    async def evaluate_query(self, query: str) -> Dict[str, Any]:
        """评估单个查询"""
        try:
            # 创建RAG链的TruLens包装器
            rag_chain = self._create_rag_chain()
            tru_rag = TruLlama(
                rag_chain,
                app_id="RAG系统评估",
                feedbacks=[
                    self.qa_relevance,
                    self.answer_completeness,
                    self.factual_accuracy,
                    self.answer_consistency,
                    self.harmfulness
                ]
            )
            
            # 执行查询并记录结果
            with tru_rag.record() as recording:
                response = rag_chain.invoke(query)
                
            # 等待评估完成
            await recording.wait_complete()
            
            # 获取评估结果
            results = recording.feedback_results
            
            return {
                "query": query,
                "response": response,
                "metrics": {
                    feedback.name: score
                    for feedback, score in results.items()
                }
            }
            
        except Exception as e:
            self.logger.error(f"查询评估出错: {str(e)}")
            raise
            
    async def evaluate_dataset(self, test_queries: List[str]) -> Dict[str, Any]:
        """评估测试数据集"""
        results = []
        metrics_summary = {}
        
        for i, query in enumerate(test_queries, 1):
            self.logger.info(f"评估查询 {i}/{len(test_queries)}: {query}")
            
            # 评估单个查询
            result = await self.evaluate_query(query)
            results.append(result)
            
            # 更新指标汇总
            for metric, score in result["metrics"].items():
                if metric not in metrics_summary:
                    metrics_summary[metric] = []
                metrics_summary[metric].append(score)
                
            self.logger.info(f"查询 {i} 评估完成")
            
        # 计算平均指标
        avg_metrics = {
            metric: float(np.mean(scores))
            for metric, scores in metrics_summary.items()
        }
        
        # 生成评估报告
        report = {
            "timestamp": datetime.now().isoformat(),
            "model_name": self.model_name,
            "embedding_model": self.embedding_model,
            "total_queries": len(test_queries),
            "average_metrics": avg_metrics,
            "detailed_results": results
        }
        
        # 保存报告
        self._save_report(report)
        
        return report
    
    def _save_report(self, report: Dict[str, Any]) -> None:
        """保存评估报告"""
        try:
            # 创建保存目录
            os.makedirs(self.save_path, exist_ok=True)
            
            # 生成文件名
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"trulens_evaluation_{timestamp}.json"
            filepath = os.path.join(self.save_path, filename)
            
            # 保存报告
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
                
            self.logger.info(f"评估报告已保存至: {filepath}")
            
        except Exception as e:
            self.logger.error(f"保存报告时出错: {str(e)}")
            raise


async def main():
    # 配置参数
    openai_api_key = os.getenv("OPENAI_API_KEY")
    if not openai_api_key:
        raise ValueError("请设置OPENAI_API_KEY环境变量")
        
    # 测试查询
    test_queries = [
        "向量检索的原理是什么？",
        "如何提高RAG系统的性能？",
        "常见的文本相似度计算方法有哪些？",
        "如何处理长文本检索的问题？",
        "RAG系统中的知识更新机制是什么？"
    ]
    
    # 创建评估器
    evaluator = RAGEvaluator(
        openai_api_key=openai_api_key,
        save_path=os.path.join(project_root, "results", "trulens_evaluation")
    )
    
    # 运行评估
    print("开始TruLens评估...")
    report = await evaluator.evaluate_dataset(test_queries)
    
    # 打印评估结果
    print("\n评估完成！")
    print("\n平均指标:")
    for metric, score in report["average_metrics"].items():
        print(f"{metric}: {score:.4f}")


if __name__ == "__main__":
    import asyncio
    asyncio.run(main()) 