import sys
import os
import json
import asyncio
from typing import List, Dict, Any
from pathlib import Path

# 添加项目根目录到Python路径
project_root = str(Path(__file__).parent.parent)
sys.path.append(project_root)

import yaml
from modular_rag.base import SearchResult, Document
from modular_rag.search.strategies import VectorSearchStrategy
from modular_rag.prediction.predictor import PredictionModule
from modular_rag.fusion.fusion import FusionModule


class RAGEvaluator:
    """RAG系统评测器"""
    def __init__(self, config_path: str):
        self.config = self._load_config(config_path)
        self.search_strategy = VectorSearchStrategy()
        self.prediction_module = PredictionModule()
        self.fusion_module = FusionModule()
        
        # 初始化各个模块
        self._initialize_modules()

    def _load_config(self, config_path: str) -> Dict[str, Any]:
        """加载配置文件"""
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)

    def _initialize_modules(self) -> None:
        """初始化所有模块"""
        self.search_strategy.initialize(self.config['search']['vector_store'])
        self.prediction_module.initialize(self.config['prediction'])
        self.fusion_module.initialize(self.config['fusion'])

    def _load_test_data(self, data_path: str) -> List[Dict[str, Any]]:
        """加载测试数据集"""
        with open(data_path, 'r', encoding='utf-8') as f:
            return json.load(f)

    async def evaluate_single_query(self, query: str, relevant_docs: List[str]) -> float:
        """评测单个查询的MRR分数"""
        # 1. 查询意图分析
        intent = await self.prediction_module.analyze_intent(query)
        
        # 2. 生成上下文
        context = await self.prediction_module.generate_context(query, intent)
        
        # 3. 查询改写
        rewritten_queries = await self.prediction_module.rewrite_query(query, context)
        
        # 4. 执行搜索
        search_results = []
        for rewritten_query in rewritten_queries:
            result = await self.search_strategy.search(rewritten_query)
            search_results.append(result)
        
        # 5. 融合结果
        merged_results = await self.fusion_module.merge_results(search_results)
        
        # 6. 重排序
        final_results = await self.fusion_module.rerank(query, merged_results)
        
        # 7. 计算MRR
        for rank, doc in enumerate(final_results.documents, 1):
            if doc.content in relevant_docs:
                return 1.0 / rank
        return 0.0

    async def evaluate_dataset(self, data_path: str) -> Dict[str, float]:
        """评测整个数据集"""
        test_data = self._load_test_data(data_path)
        total_mrr = 0.0
        results = {}
        
        for i, item in enumerate(test_data, 1):
            query = item['query']
            relevant_docs = item['relevant_docs']
            
            # 计算单个查询的MRR
            mrr = await self.evaluate_single_query(query, relevant_docs)
            total_mrr += mrr
            
            # 记录结果
            results[query] = {
                'mrr': mrr,
                'relevant_docs': relevant_docs
            }
            
            # 打印进度
            print(f"Progress: {i}/{len(test_data)} queries evaluated")
            print(f"Query: {query}")
            print(f"MRR: {mrr:.4f}")
            print("-" * 50)
        
        # 计算平均MRR
        avg_mrr = total_mrr / len(test_data)
        results['average_mrr'] = avg_mrr
        
        return results

    def save_results(self, results: Dict[str, Any], output_path: str) -> None:
        """保存评测结果"""
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2)


async def main():
    # 配置文件路径
    config_path = os.path.join(project_root, 'config', 'config.yaml')
    
    # 测试数据集路径
    data_path = os.path.join(project_root, 'data', 'test_queries.json')
    
    # 评测结果输出路径
    output_path = os.path.join(project_root, 'results', 'mrr_evaluation.json')
    
    # 创建评测器实例
    evaluator = RAGEvaluator(config_path)
    
    # 运行评测
    results = await evaluator.evaluate_dataset(data_path)
    
    # 保存结果
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    evaluator.save_results(results, output_path)
    
    # 打印最终结果
    print("\nEvaluation Complete!")
    print(f"Average MRR: {results['average_mrr']:.4f}")


if __name__ == '__main__':
    asyncio.run(main()) 