from typing import List, Dict, Any, Optional
from data_processor import DataProcessor
from database import db_manager
from config import config
import numpy as np
from rank_bm25 import BM25Okapi

class Retriever:
    """检索模块"""
    
    def __init__(self):
        self.data_processor = DataProcessor()
        self.initialized = False
    
    def initialize(self):
        """初始化检索器"""
        if self.initialized:
            return
            
        # 验证OneAPI服务
        if not self.data_processor.validate_oneapi_service():
            raise Exception("OneAPI服务验证失败")
        
        # 检查数据库中的文档
        doc_count = db_manager.get_document_count()
        
        if doc_count == 0:
            print("数据库为空，创建测试数据...")
            # 创建测试数据
            documents = self.data_processor.create_test_data()
            # 清空并插入新数据
            db_manager.clear_collection()
            db_manager.insert_documents(documents)
        else:
            print(f"从数据库加载 {doc_count} 个文档...")
            documents = db_manager.get_all_documents()
            # 转换嵌入向量为numpy数组
            for doc in documents:
                if "embedding" in doc:
                    doc["embedding"] = np.array(doc["embedding"])
        
        # 初始化检索模型
        self.data_processor.initialize_retrieval_models(documents)
        self.initialized = True
        print("✅ 检索器初始化完成")
    
    def search(self, query: str, search_type: str = "hybrid", article_title: str = "", k: int = 0) -> List[Dict[str, Any]]:
        """执行检索"""
        if not self.initialized:
            self.initialize()
        
        if k == 0:
            k = config.TOP_K
        
        if search_type == "hybrid":
            return self.data_processor.hybrid_search(query, article_title, k)
        elif search_type == "tfidf_expansion":
            return self.data_processor.tfidf_expansion_vector_search(query, article_title, k)
        elif search_type == "improved_keyword":
            return self.data_processor.improved_keyword_search(query, article_title, k)
        elif search_type == "tfidf":
            return self.data_processor.tfidf_search(query, article_title, k)
        elif search_type == "bm25":
            return self._bm25_search(query, article_title, k)
        elif search_type == "vector":
            return self._vector_search(query, article_title, k)
        else:
            raise ValueError(f"不支持的检索类型: {search_type}")
    
    def _bm25_search(self, query: str, article_title: str = "", k: int = 0) -> List[Dict[str, Any]]:
        """BM25检索"""
        print(f"BM25检索查询: '{query}'，文章标题: '{article_title}'")
        
        # 根据文章标题筛选文档
        if article_title:
            filtered_docs = [doc for doc in self.data_processor.all_docs if doc.get("title", "") == article_title]
            if not filtered_docs:
                print(f"未找到标题为 '{article_title}' 的文档")
                return []
        else:
            filtered_docs = self.data_processor.all_docs

        print(f"筛选后文档数量: {len(filtered_docs)}")

        # 为筛选后的文档重新构建BM25模型
        tokenized_corpus_filtered = []
        for doc in filtered_docs:
            if "tokenized_content" in doc:
                tokenized_corpus_filtered.append(doc["tokenized_content"])
            else:
                tokenized_content = self.data_processor.tokenize_text(doc["content"])
                tokenized_corpus_filtered.append(tokenized_content)
        
        # 检查tokenized_corpus是否为空
        if not tokenized_corpus_filtered:
            print("警告: tokenized_corpus为空")
            return []
        
        # 检查每个文档的tokenized_content是否为空
        for i, tokens in enumerate(tokenized_corpus_filtered):
            if not tokens:
                print(f"警告: 文档 {i} 的tokenized_content为空")
        
        bm25_filtered = BM25Okapi(tokenized_corpus_filtered)
        
        tokenized_query = self.data_processor.tokenize_text(query)
        print(f"查询分词结果: {tokenized_query}")
        
        if not tokenized_query:
            print("警告: 查询分词结果为空")
            return []
        
        bm25_scores = bm25_filtered.get_scores(tokenized_query)
        print(f"BM25原始分数: {bm25_scores}")
        
        # 获取所有分数大于0的结果，而不是只取前k个
        results = []
        for idx, score in enumerate(bm25_scores):
            if score > 0:
                doc = filtered_docs[idx].copy()
                doc["bm25_score"] = score
                results.append(doc)
        
        # 按分数排序
        results.sort(key=lambda x: x["bm25_score"], reverse=True)
        
        # 限制返回数量
        if k > 0:
            results = results[:k]
        
        print(f"BM25检索完成，找到 {len(results)} 个相关文档")
        for i, result in enumerate(results[:3]):  # 只显示前3个结果
            print(f"  第{i+1}名: 分数={result['bm25_score']:.4f}, 标题={result['title']}")
        
        return results
    
    def _vector_search(self, query: str, article_title: str = "", k: int = 0) -> List[Dict[str, Any]]:
        """向量检索"""
        # 根据文章标题筛选文档
        if article_title:
            filtered_docs = [doc for doc in self.data_processor.all_docs if doc.get("title", "") == article_title]
            if not filtered_docs:
                return []
        else:
            filtered_docs = self.data_processor.all_docs

        query_embedding = self.data_processor.generate_embedding(query)
        query_vector = np.array(query_embedding).reshape(1, -1)
        
        similarities = []
        for doc in filtered_docs:
            doc_vector = np.array(doc["embedding"]).reshape(1, -1)
            similarity = np.dot(query_vector, doc_vector.T) / (
                np.linalg.norm(query_vector) * np.linalg.norm(doc_vector)
            )
            similarities.append(similarity[0][0])
        
        top_indices = np.argsort(similarities)[::-1][:k]
        
        results = []
        for idx in top_indices:
            if similarities[idx] > 0:
                doc = filtered_docs[idx].copy()
                doc["similarity_score"] = similarities[idx]
                results.append(doc)
        
        return results
    
    def get_context_from_results(self, results: List[Dict[str, Any]], max_length: int = 2000) -> str:
        """从检索结果构建上下文"""
        print(f"🔍 构建上下文:")
        print(f"   检索结果数量: {len(results)}")
        print(f"   最大长度限制: {max_length}")
        
        context_parts = []
        current_length = 0
        
        for i, result in enumerate(results):
            # 处理不同的结果格式
            if isinstance(result, dict) and "doc" in result:
                doc = result["doc"]
                print(f"   结果 {i+1}: 使用doc字段")
            else:
                doc = result
                print(f"   结果 {i+1}: 直接使用result")
            
            print(f"   结果 {i+1} 标题: {doc.get('title', 'N/A')}")
            print(f"   结果 {i+1} 内容长度: {len(doc.get('content', ''))}")
            
            content = f"标题: {doc['title']}\n内容: {doc['content']}\n分类: {doc['category']}\n"
            content_length = len(content)
            
            if current_length + content_length > max_length:
                print(f"   达到长度限制，停止添加更多内容")
                break
            
            context_parts.append(content)
            current_length += content_length
            print(f"   当前总长度: {current_length}")
        
        final_context = "\n".join(context_parts)
        print(f"✅ 上下文构建完成，最终长度: {len(final_context)} 字符")
        
        return final_context
    
    def format_search_results(self, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """格式化检索结果"""
        formatted_results = []
        
        for i, result in enumerate(results):
            # 处理不同的结果格式
            if isinstance(result, dict) and "doc" in result:
                doc = result["doc"]
                scores = {}
                
                # 添加各种分数
                if "score" in result:
                    scores["混合分数"] = round(result["score"], 4)
                if "bm25_score" in result:
                    scores["BM25分数"] = round(result["bm25_score"], 4)
                if "similarity_score" in result:
                    scores["向量相似度"] = round(result["similarity_score"], 4)
                if "enhanced_score" in result:
                    scores["增强分数"] = round(result["enhanced_score"], 4)
                if "position_weight" in result:
                    scores["位置权重"] = round(result["position_weight"], 2)
                if "title_match" in result:
                    scores["标题匹配"] = round(result["title_match"], 2)
                if "term_freq_boost" in result:
                    scores["词频增强"] = round(result["term_freq_boost"], 2)
                if "expanded_score" in result:
                    scores["扩展分数"] = round(result["expanded_score"], 4)
                if "original_score" in result:
                    scores["原始分数"] = round(result["original_score"], 4)
                if "tfidf_score" in result:
                    scores["TF-IDF分数"] = round(result["tfidf_score"], 4)
                    
            else:
                doc = result
                scores = {}
                
                # 添加基础分数
                if "score" in result:
                    scores["综合分数"] = round(result["score"], 4)
                if "bm25_score" in result:
                    scores["BM25分数"] = round(result["bm25_score"], 4)
                if "similarity_score" in result:
                    scores["相似度"] = round(result["similarity_score"], 4)
                if "tfidf_score" in result:
                    scores["TF-IDF分数"] = round(result["tfidf_score"], 4)
            
            formatted_doc = {
                "rank": i + 1,
                "title": doc.get("title", ""),
                "content": doc.get("content", ""),
                "category": doc.get("category", ""),
                "scores": scores
            }
            
            formatted_results.append(formatted_doc)
        
        return formatted_results

# 全局检索器实例
retriever = Retriever() 