import sys
import os
import json
import logging
from pathlib import Path
from typing import List, Dict, Any, Union, Optional

# 添加项目根目录到Python路径
project_root = str(Path(__file__).parent.parent)
sys.path.append(project_root)

import numpy as np
from mteb import MTEB
from sentence_transformers import SentenceTransformer
from modular_rag.search.strategies import VectorSearchStrategy
from modular_rag.prediction.predictor import PredictionModule
from modular_rag.fusion.fusion import FusionModule


class RAGEmbedder:
    """RAG系统的文本嵌入器，用于MTEB评估"""
    def __init__(self, model_name: str = "BAAI/bge-large-zh-v1.5"):
        self.model = SentenceTransformer(model_name)
        self.search_strategy = VectorSearchStrategy()
        self.prediction_module = PredictionModule()
        self.fusion_module = FusionModule()
        
        # 初始化日志
        self.logger = logging.getLogger(__name__)
        self._setup_logging()

    def _setup_logging(self):
        """设置日志"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
        )

    def encode(self, sentences: List[str], 
               batch_size: int = 32, 
               show_progress_bar: bool = False, 
               convert_to_numpy: bool = True,
               normalize_embeddings: bool = True,
               **kwargs) -> Union[np.ndarray, List[Any]]:
        """编码文本为向量表示"""
        try:
            # 使用预测模块进行查询处理
            processed_sentences = []
            for sentence in sentences:
                # 分析查询意图
                intent = self.prediction_module.analyze_intent(sentence)
                
                # 生成上下文
                context = self.prediction_module.generate_context(sentence, intent)
                
                # 查询改写
                rewritten_queries = self.prediction_module.rewrite_query(sentence, context)
                
                # 使用第一个改写查询
                processed_sentences.append(rewritten_queries[0])
            
            # 使用Sentence Transformer进行编码
            embeddings = self.model.encode(
                processed_sentences,
                batch_size=batch_size,
                show_progress_bar=show_progress_bar,
                convert_to_numpy=convert_to_numpy,
                normalize_embeddings=normalize_embeddings
            )
            
            return embeddings
            
        except Exception as e:
            self.logger.error(f"编码过程出错: {str(e)}")
            raise

    def encode_queries(self, queries: List[str], **kwargs) -> np.ndarray:
        """编码查询文本"""
        return self.encode(queries, **kwargs)

    def encode_corpus(self, corpus: List[str], **kwargs) -> np.ndarray:
        """编码语料库文本"""
        return self.encode(corpus, **kwargs)


class MTEBEvaluator:
    """使用MTEB评估RAG系统"""
    def __init__(self, 
                 model_name: str = "BAAI/bge-large-zh-v1.5",
                 task_types: Optional[List[str]] = None,
                 save_path: str = "results/mteb_evaluation"):
        self.model_name = model_name
        self.embedder = RAGEmbedder(model_name)
        self.task_types = task_types or [
            "Retrieval",
            "Reranking",
            "STS",
            "Clustering",
            "Classification"
        ]
        self.save_path = save_path
        
        # 初始化日志
        self.logger = logging.getLogger(__name__)

    def evaluate(self) -> Dict[str, Any]:
        """运行MTEB评估"""
        try:
            # 创建MTEB评估器
            evaluation = MTEB(
                tasks=self.task_types,
                task_langs=["zh"]  # 指定中文任务
            )
            
            # 运行评估
            results = evaluation.run(
                self.embedder,
                output_folder=self.save_path,
                eval_splits=["dev", "test"]  # 评估开发集和测试集
            )
            
            # 保存详细结果
            self._save_results(results)
            
            return results
            
        except Exception as e:
            self.logger.error(f"评估过程出错: {str(e)}")
            raise

    def _save_results(self, results: Dict[str, Any]) -> None:
        """保存评估结果"""
        try:
            # 创建保存目录
            os.makedirs(self.save_path, exist_ok=True)
            
            # 保存完整结果
            output_path = os.path.join(self.save_path, "mteb_results.json")
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, ensure_ascii=False, indent=2)
            
            # 生成评估报告
            self._generate_report(results)
            
        except Exception as e:
            self.logger.error(f"保存结果时出错: {str(e)}")
            raise

    def _generate_report(self, results: Dict[str, Any]) -> None:
        """生成评估报告"""
        try:
            report = {
                "model_name": self.model_name,
                "task_types": self.task_types,
                "summary": {},
                "detailed_results": {}
            }
            
            # 计算每种任务类型的平均分数
            for task_type in self.task_types:
                task_scores = []
                for task_name, task_result in results.items():
                    if task_type.lower() in task_name.lower():
                        # 收集所有指标的分数
                        scores = []
                        for metric, value in task_result.items():
                            if isinstance(value, (int, float)):
                                scores.append(value)
                        if scores:
                            task_scores.append(np.mean(scores))
                
                if task_scores:
                    report["summary"][task_type] = {
                        "mean_score": float(np.mean(task_scores)),
                        "std_score": float(np.std(task_scores))
                    }
            
            # 保存详细任务结果
            for task_name, task_result in results.items():
                report["detailed_results"][task_name] = task_result
            
            # 保存报告
            report_path = os.path.join(self.save_path, "evaluation_report.json")
            with open(report_path, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
            
        except Exception as e:
            self.logger.error(f"生成报告时出错: {str(e)}")
            raise


def main():
    # 设置评估参数
    model_name = "BAAI/bge-large-zh-v1.5"
    task_types = ["Retrieval", "Reranking", "STS"]
    save_path = os.path.join(project_root, "results", "mteb_evaluation")
    
    # 创建评估器
    evaluator = MTEBEvaluator(
        model_name=model_name,
        task_types=task_types,
        save_path=save_path
    )
    
    # 运行评估
    print("开始MTEB评估...")
    results = evaluator.evaluate()
    
    print("\n评估完成！")
    print(f"结果已保存到: {save_path}")
    
    # 打印主要结果
    for task_type in task_types:
        scores = []
        for task_name, task_result in results.items():
            if task_type.lower() in task_name.lower():
                for metric, value in task_result.items():
                    if isinstance(value, (int, float)):
                        scores.append(value)
        if scores:
            mean_score = np.mean(scores)
            print(f"\n{task_type} 任务平均分数: {mean_score:.4f}")


if __name__ == "__main__":
    main() 