"""
检索模块 - 向量检索和重排序
"""
import logging
import time
import json
from typing import List, Dict, Any, Optional
from datetime import datetime

from ..config.settings import RETRIEVAL_CONFIG
from ..core.vectorizer import vectorizer
from ..core.milvus_client import milvus_client
from ..config.database import db_manager, SearchHistory, VectorChunk

logger = logging.getLogger(__name__)

class Retriever:
    """检索器"""
    
    def __init__(self):
        self.top_k = RETRIEVAL_CONFIG["top_k"]
        self.rerank_top_k = RETRIEVAL_CONFIG["rerank_top_k"]
        self.similarity_threshold = RETRIEVAL_CONFIG["similarity_threshold"]
        self.enable_rerank = RETRIEVAL_CONFIG["enable_rerank"]
        self.enable_hybrid_search = RETRIEVAL_CONFIG.get("enable_hybrid_search", False)
        logger.info(f"初始化检索器: top_k={self.top_k}, similarity_threshold={self.similarity_threshold}")
    
    def search(self, query: str, top_k: int = None, filter_expr: str = None, 
               user_id: str = None, session_id: str = None) -> Dict[str, Any]:
        """
        执行检索
        
        Args:
            query: 查询文本
            top_k: 返回结果数量
            filter_expr: 过滤表达式
            user_id: 用户ID
            session_id: 会话ID
            
        Returns:
            检索结果
        """
        start_time = time.time()
        logger.info(f"开始执行检索: query='{query}', top_k={top_k}")
        
        try:
            if not query or not query.strip():
                logger.warning("查询文本为空")
                return {
                    "success": False,
                    "error": "查询文本不能为空",
                    "results": [],
                    "search_time": 0
                }
            
            # 设置返回结果数量
            if top_k is None:
                top_k = self.top_k
            
            logger.info(f"搜索参数: query='{query}', top_k={top_k}, filter_expr={filter_expr}")
            
            # 检查缓存
            from ..utils.cache_manager import vector_cache
            cached_results = vector_cache.get_search_results(query, top_k)
            if cached_results is not None:
                logger.info(f"从缓存获取搜索结果: {len(cached_results)} 条")
                return {
                    "success": True,
                    "query": query,
                    "results": cached_results,
                    "total_found": len(cached_results),
                    "total_returned": len(cached_results),
                    "search_time": 0.001,  # 缓存命中，时间很短
                    "similarity_threshold": self.similarity_threshold,
                    "cached": True,
                    "source": "cache",  # 检索来源：缓存
                    "milvus_available": milvus_client.connection is not None  # Milvus是否可用
                }
            
            # 检查Milvus是否可用
            milvus_available = milvus_client.connection is not None
            logger.info(f"Milvus可用性: {milvus_available}")
            
            if milvus_available:
                try:
                    logger.info(f"开始执行向量搜索: query='{query}', top_k={top_k}")
                    
                    # 1. 查询向量化
                    query_embedding = vectorizer.encode_text(query)
                    logger.info(f"查询文本 '{query}' 已向量化，维度: {len(query_embedding)}")
                    
                    # 2. 向量检索
                    search_results = milvus_client.search_vectors(
                        query_embedding=query_embedding.tolist(),
                        top_k=top_k,
                        filter_expr=filter_expr
                    )
                    logger.info(f"从Milvus检索到 {len(search_results)} 条结果")
                    if search_results:
                        logger.info(f"最高分结果: Score={search_results[0]['score']:.4f}")
                    else:
                        logger.info("未从Milvus检索到任何结果")
                    
                    # 3. 结果过滤（相似度阈值）
                    filtered_results = []
                    for result in search_results:
                        logger.debug(f"检查结果: Score={result['score']:.4f}, Threshold={self.similarity_threshold}")
                        if result["score"] >= self.similarity_threshold:
                            filtered_results.append(result)
                    logger.info(f"过滤后剩余 {len(filtered_results)} 条结果 (阈值: {self.similarity_threshold})")
                    if filtered_results:
                        logger.info(f"过滤后最高分结果: Score={filtered_results[0]['score']:.4f}")
                    else:
                        logger.info("过滤后没有符合条件的结果")
                    
                    # 4. 重排序（如果启用）
                    if self.enable_rerank and filtered_results:
                        reranked_results = self._rerank_results(query, filtered_results)
                        final_results = reranked_results[:self.rerank_top_k]
                        logger.debug(f"重排序后取前 {len(final_results)} 条结果")
                    else:
                        final_results = filtered_results[:top_k]
                        logger.debug(f"直接取前 {len(final_results)} 条结果")
                    
                    # 5. 格式化结果
                    formatted_results = self._format_results(final_results)
                    logger.debug(f"格式化后结果数量: {len(formatted_results)}")
                    
                    # 来源：向量库
                    source = "vector"
                except Exception as e:
                    logger.error(f"向量检索失败: {e}")
                    logger.exception(e)
                    # Milvus检索失败时，从数据库进行关键词检索
                    logger.info("向量检索失败，使用数据库关键词检索作为备选方案")
                    formatted_results = self._keyword_search_from_database(query, top_k)
                    # 来源：数据库（作为备选方案）
                    source = "database_fallback"
            else:
                # Milvus不可用时，从数据库进行关键词检索
                logger.info("Milvus不可用，使用数据库关键词检索")
                formatted_results = self._keyword_search_from_database(query, top_k)
                # 来源：数据库
                source = "database"
            
            # 6. 缓存搜索结果
            vector_cache.set_search_results(query, top_k, formatted_results)
            
            # 7. 记录检索历史
            search_time = time.time() - start_time
            self._save_search_history(
                query=query,
                result_count=len(formatted_results),
                search_time=search_time,
                user_id=user_id,
                session_id=session_id
            )
            
            logger.info(f"最终返回结果数量: {len(formatted_results)}")
            
            return {
                "success": True,
                "query": query,
                "results": formatted_results,
                "total_found": len(formatted_results),
                "total_returned": len(formatted_results),
                "search_time": search_time,
                "similarity_threshold": self.similarity_threshold,
                "cached": False,
                "source": source,  # 检索来源
                "milvus_available": milvus_available  # Milvus是否可用
            }
            
        except Exception as e:
            logger.error(f"检索失败: {e}")
            logger.exception(e)
            return {
                "success": False,
                "error": str(e),
                "results": [],
                "search_time": time.time() - start_time
            }
    
    def _rerank_results(self, query: str, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        重排序结果
        
        Args:
            query: 查询文本
            results: 检索结果
            
        Returns:
            重排序后的结果
        """
        try:
            # 这里可以集成本地rerank模型
            # 目前使用简单的相似度排序
            reranked_results = sorted(results, key=lambda x: x["score"], reverse=True)
            return reranked_results
            
        except Exception as e:
            logger.error(f"重排序失败: {e}")
            return results
    
    def _format_results(self, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        格式化检索结果
        
        Args:
            results: 原始结果
            
        Returns:
            格式化后的结果
        """
        formatted_results = []
        
        for i, result in enumerate(results):
            try:
                # 解析元数据
                metadata = {}
                if result.get("metadata"):
                    try:
                        metadata = json.loads(result["metadata"])
                    except:
                        metadata = {}
                
                formatted_result = {
                    "rank": i + 1,
                    "id": result["id"],
                    "score": float(result["score"]),
                    "content": result["content"],
                    "file_id": result["file_id"],
                    "chunk_id": result["chunk_id"],
                    "chunk_index": result["chunk_index"],
                    "file_type": result["file_type"],
                    "filename": result["filename"],
                    "metadata": metadata,
                    "created_time": result["created_time"]
                }
                
                formatted_results.append(formatted_result)
                
            except Exception as e:
                logger.error(f"格式化结果失败: {e}")
                continue
        
        return formatted_results
    
    def _save_search_history(self, query: str, result_count: int, search_time: float,
                           user_id: str = None, session_id: str = None):
        """保存检索历史"""
        try:
            session = db_manager.get_session()
            
            search_record = SearchHistory(
                search_id=f"search_{int(time.time())}_{hash(query) % 10000}",
                query=query,
                query_type="semantic",
                result_count=result_count,
                search_time=search_time,
                user_id=user_id,
                session_id=session_id,
                created_time=datetime.now()
            )
            
            session.add(search_record)
            session.commit()
            session.close()
            
            # 记录详细的搜索历史信息
            logger.info(f"保存检索历史: query='{query}', result_count={result_count}, search_time={search_time:.3f}s")
            
        except Exception as e:
            logger.error(f"保存检索历史失败: {e}")
            if session:
                session.rollback()
                session.close()
    
    def _keyword_search_from_database(self, query: str, top_k: int) -> List[Dict[str, Any]]:
        """
        从数据库进行关键词检索（当Milvus不可用时的备选方案）
        
        Args:
            query: 查询文本
            top_k: 返回结果数量
            
        Returns:
            检索结果列表
        """
        try:
            logger.info(f"执行数据库关键词搜索: query='{query}', top_k={top_k}")
            session = db_manager.get_session()
            
            # 使用简单的LIKE查询进行关键词匹配
            results = session.query(VectorChunk).filter(
                VectorChunk.content.like(f"%{query}%")
            ).limit(top_k).all()
            
            formatted_results = []
            for i, chunk in enumerate(results):
                try:
                    # 解析元数据
                    metadata = {}
                    if chunk.metadata:
                        try:
                            metadata = json.loads(chunk.metadata)
                        except:
                            metadata = {}
                    
                    formatted_result = {
                        "rank": i + 1,
                        "id": chunk.id,
                        "score": 0.9,  # 关键词匹配使用固定高分
                        "content": chunk.content,
                        "file_id": chunk.file_id,
                        "chunk_id": chunk.chunk_id,
                        "chunk_index": chunk.chunk_index,
                        "file_type": chunk.file_type if chunk.file_type else "txt",
                        "filename": chunk.filename if chunk.filename else "unknown",
                        "metadata": metadata,
                        "created_time": int(chunk.created_time.timestamp()) if chunk.created_time else 0
                    }
                    
                    formatted_results.append(formatted_result)
                    
                except Exception as e:
                    logger.error(f"格式化结果失败: {e}")
                    continue
            
            session.close()
            logger.info(f"数据库关键词搜索完成，返回 {len(formatted_results)} 条结果")
            return formatted_results
            
        except Exception as e:
            logger.error(f"数据库关键词检索失败: {e}")
            return []
    
    def hybrid_search(self, query: str, top_k: int = None, filter_expr: str = None) -> Dict[str, Any]:
        """
        混合检索（语义 + 关键词）
        
        Args:
            query: 查询文本
            top_k: 返回结果数量
            filter_expr: 过滤表达式
            
        Returns:
            混合检索结果
        """
        try:
            if not self.enable_hybrid_search:
                return self.search(query, top_k, filter_expr)
            
            # 1. 语义检索
            semantic_results = self.search(query, top_k, filter_expr)
            
            # 2. 关键词检索（这里可以扩展）
            # keyword_results = self._keyword_search(query, top_k, filter_expr)
            
            # 3. 结果融合
            # 目前只返回语义检索结果，可以扩展为融合两种结果
            return semantic_results
            
        except Exception as e:
            logger.error(f"混合检索失败: {e}")
            return {
                "success": False,
                "error": str(e),
                "results": [],
                "search_time": 0
            }
    
    def batch_search(self, queries: List[str], top_k: int = None) -> List[Dict[str, Any]]:
        """
        批量检索
        
        Args:
            queries: 查询列表
            top_k: 返回结果数量
            
        Returns:
            批量检索结果
        """
        results = []
        
        for query in queries:
            try:
                result = self.search(query, top_k)
                results.append({
                    "query": query,
                    "result": result
                })
            except Exception as e:
                logger.error(f"批量检索失败: {query}, 错误: {e}")
                results.append({
                    "query": query,
                    "result": {
                        "success": False,
                        "error": str(e),
                        "results": []
                    }
                })
        
        return results
    
    def get_search_history(self, user_id: str = None, limit: int = 100) -> List[Dict[str, Any]]:
        """
        获取检索历史
        
        Args:
            user_id: 用户ID
            limit: 返回数量限制
            
        Returns:
            检索历史列表
        """
        try:
            session = db_manager.get_session()
            
            query = session.query(SearchHistory)
            
            if user_id:
                query = query.filter(SearchHistory.user_id == user_id)
            
            history_records = query.order_by(SearchHistory.created_time.desc()).limit(limit).all()
            
            history_list = []
            for record in history_records:
                history_dict = {
                    "search_id": record.search_id,
                    "query": record.query,
                    "query_type": record.query_type,
                    "result_count": record.result_count,
                    "search_time": record.search_time,
                    "created_time": record.created_time.isoformat() if record.created_time else None,
                    "user_id": record.user_id,
                    "session_id": record.session_id
                }
                history_list.append(history_dict)
            
            session.close()
            
            return history_list
            
        except Exception as e:
            logger.error(f"获取检索历史失败: {e}")
            if session:
                session.close()
            return []
    
    def get_search_stats(self) -> Dict[str, Any]:
        """获取检索统计信息"""
        try:
            session = db_manager.get_session()
            
            # 总检索次数
            total_searches = session.query(SearchHistory).count()
            
            # 平均检索时间
            avg_search_time = session.query(SearchHistory.search_time).filter(
                SearchHistory.search_time.isnot(None)
            ).all()
            
            if avg_search_time:
                avg_time = sum(time for (time,) in avg_search_time) / len(avg_search_time)
            else:
                avg_time = 0
            
            # 热门查询
            from sqlalchemy import func
            popular_queries = session.query(
                SearchHistory.query,
                func.count(SearchHistory.id).label('count')
            ).group_by(SearchHistory.query).order_by(
                func.count(SearchHistory.id).desc()
            ).limit(10).all()
            
            session.close()
            
            return {
                "total_searches": total_searches,
                "average_search_time": avg_time,
                "popular_queries": [
                    {"query": query, "count": count}
                    for query, count in popular_queries
                ]
            }
            
        except Exception as e:
            logger.error(f"获取检索统计信息失败: {e}")
            if session:
                session.close()
            return {}

class Reranker:
    """重排序器"""
    
    def __init__(self):
        # 这里可以加载本地rerank模型
        self.model = None
        self.device = "cpu"

# 创建全局检索器实例
retriever = Retriever()
