import faiss
import numpy as np
from typing import List, Dict, Optional
import sqlite3
import os
import time
from enum import Enum
from rank_bm25 import BM25Okapi
from config import Config, SearchStrategy
from utils.logger import logger
import pickle

class HybridRetriever:
    def __init__(self, 
                 vector_dim: int = Config.VECTOR_DIM,
                 strategy: SearchStrategy = Config.DEFAULT_SEARCH_STRATEGY,
                 min_vector_score: float = Config.MIN_VECTOR_SCORE):
        """
        Args:
            vector_dim: 向量维度
            strategy: 检索策略
            min_vector_score: 向量检索最小分数(用于过滤低相关度结果)
        """
        self.vector_dim = vector_dim
        self.strategy = strategy
        self.min_vector_score = min_vector_score
        
        # 向量索引
        self.index = faiss.IndexFlatIP(vector_dim)
        
        # BM25索引
        self.bm25 = None
        
        # 数据库
        self.db_dir = Config.DB_DIR
        os.makedirs(self.db_dir, exist_ok=True)
        self.db_path = os.path.join(self.db_dir, Config.DB_NAME)
        self._init_db()
        
    def _init_db(self):
        """初始化SQLite数据库"""
        try:
            conn = sqlite3.connect(self.db_path)
            c = conn.cursor()
            
            # 创建表
            c.execute('''CREATE TABLE IF NOT EXISTS documents
                         (chunk_index INTEGER PRIMARY KEY, 
                          content TEXT,
                          doc_type TEXT DEFAULT 'paragraph')''')
            
            # 检查并添加 doc_type 列（如果不存在）
            c.execute("PRAGMA table_info(documents)")
            columns = [column[1] for column in c.fetchall()]
            
            if 'doc_type' not in columns:
                logger.info("检测到旧版数据库，正在添加 doc_type 列")
                c.execute("ALTER TABLE documents ADD COLUMN doc_type TEXT DEFAULT 'paragraph'")
                logger.info("成功添加 doc_type 列")
            
            conn.commit()
            conn.close()
        except Exception as e:
            logger.error(f"初始化数据库失败: {str(e)}", exc_info=True)
            raise
        
    def add_documents(self, documents: List[str], qa_pairs: List[str], embeddings: List[List[float]], doc_types: List[str] = None):
        """添加新文档，覆盖之前的内容"""
        try:
            # 重置FAISS索引
            self.index = faiss.IndexFlatIP(self.vector_dim)
            embeddings_np = np.array(embeddings).astype('float32')
            faiss.normalize_L2(embeddings_np)
            self.index.add(embeddings_np)
            
            # 存储到数据库
            conn = sqlite3.connect(self.db_path)
            c = conn.cursor()
            c.execute("DELETE FROM documents")
            
            # 准备存储的数据 - 混合模式下的智能存储
            store_docs = []
            doc_types_to_store = doc_types if doc_types else ['paragraph'] * len(documents)
            
            for i, (doc, doc_type) in enumerate(zip(documents, doc_types_to_store)):
                # 根据文档类型决定存储内容
                if doc_type in ['qa', 'paragraph_qa']:
                    # QA对类型：存储答案内容（用于返回）
                    if i < len(qa_pairs) and qa_pairs[i].strip():
                        store_docs.append(qa_pairs[i])  # 使用答案内容
                    else:
                        store_docs.append(doc)  # 回退到问题内容
                else:
                    # 段落类型：直接存储段落内容
                    store_docs.append(doc)
            
            # 插入数据
            c.executemany(
                "INSERT INTO documents (chunk_index, content, doc_type) VALUES (?, ?, ?)",
                [(i, content, doc_type) for i, (content, doc_type) in enumerate(zip(store_docs, doc_types_to_store))]
            )
            conn.commit()
            conn.close()
            
            # 更新BM25索引 - 使用向量索引的内容（questions/paragraphs）
            self.bm25 = BM25Okapi([doc.split() for doc in documents])
            
        except Exception as e:
            logger.error(f"添加文档失败: {str(e)}", exc_info=True)
            raise
            
    def _get_all_documents(self) -> List[str]:
        """获取所有文档内容"""
        try:
            conn = sqlite3.connect(self.db_path)
            c = conn.cursor()
            c.execute("SELECT content FROM documents ORDER BY chunk_index")
            results = c.fetchall()
            conn.close()
            return [row[0] for row in results]
        except Exception as e:
            logger.error(f"获取文档内容失败: {str(e)}")
            return []
            
    def search(self, query: str, query_embedding: List[float], top_k: int) -> List[Dict]:
        """检索相关文档"""
        try:
            if self.strategy == SearchStrategy.VECTOR:
                return self._vector_search(query_embedding, top_k)
            elif self.strategy == SearchStrategy.CASCADE:
                return self._cascade_search(query, query_embedding, top_k)
            else:  # RERANK
                return self._rerank_search(query, query_embedding, top_k)
        except Exception as e:
            logger.error(f"检索失败: {str(e)}")
            return []
            
    def _vector_search(self, query_embedding: List[float], top_k: int) -> List[Dict]:
        """向量检索"""
        try:
            query_embedding = np.array([query_embedding], dtype=np.float32)
            faiss.normalize_L2(query_embedding)
            
            # 向量检索召回候选集（获取更多候选以便分类过滤）
            search_k = min(top_k * 3, self.index.ntotal)  # 扩大搜索范围
            vector_scores, indices = self.index.search(
                query_embedding,
                search_k
            )
            logger.info(f"向量召回 - 索引: {indices[0][:top_k]}, 分数: {vector_scores[0][:top_k]}")
            
            # 根据文档类型使用不同阈值过滤
            results = []
            doc_types = self._get_doc_types_by_indices(indices[0].tolist())
            
            for idx, score, doc_type in zip(indices[0], vector_scores[0], doc_types):
                # 根据文档类型选择阈值
                if doc_type in ['qa', 'paragraph_qa']:  # QA对和段落生成的问题都使用高阈值
                    threshold = Config.MIN_VECTOR_SCORE_QA
                else:  # paragraph - 普通段落
                    threshold = Config.MIN_VECTOR_SCORE_PARAGRAPH
                
                if score >= threshold:
                    results.append({
                        'chunk_index': int(idx),
                        'score': float(score),
                        'doc_type': doc_type
                    })
                    
                # 达到所需数量就停止
                if len(results) >= top_k:
                    break
            
            logger.info(f"向量检索过滤后 - 结果数: {len(results)}, QA结果: {sum(1 for r in results if r['doc_type'] == 'qa')}, 段落结果: {sum(1 for r in results if r['doc_type'] == 'paragraph')}")
            return results[:top_k]
            
        except Exception as e:
            logger.error(f"向量检索失败: {str(e)}")
            return []
            
    def _cascade_search(self, query: str, query_embedding: List[float], top_k: int) -> List[Dict]:
        """级联检索：优先BM25检索，无结果再用向量"""
        try:
            results = []
            
            # 先用BM25检索
            if self.bm25 is None:
                documents = self._get_all_documents()
                self.bm25 = BM25Okapi([doc.split() for doc in documents])
                
            bm25_scores = self.bm25.get_scores(query.split())
            
            # 获取BM25的top结果
            bm25_indices = np.argsort(bm25_scores)[-top_k:][::-1]
            
            # 添加高分结果
            bm25_threshold = Config.BM25_THRESHOLD
            for idx in bm25_indices:
                score = float(bm25_scores[idx])
                if score >= bm25_threshold:  # 使用BM25原始分数阈值
                    results.append({
                        "chunk_index": int(idx),
                        "score": score
                    })
                    
            # 只有在BM25无结果时，才使用向量检索
            if not results:
                # 向量检索
                vector_scores, indices = self.index.search(
                    np.array([query_embedding], dtype=np.float32), 
                    top_k
                )
                
                # 添加高分结果
                for score, idx in zip(vector_scores[0], indices[0]):
                    if score >= self.min_vector_score:  # 使用向量阈值
                        results.append({
                            "chunk_index": int(idx),
                            "score": float(score)
                        })
                            
            return results
            
        except Exception as e:
            logger.error(f"级联检索失败: {str(e)}")
            return []
        
    def _rerank_search(self, query: str, query_embedding: List[float], top_k: int) -> List[Dict]:
        """重排序检索：先向量召回，再用BM25重排"""
        try:
            # 归一化查询向量
            query_embedding = np.array([query_embedding], dtype=np.float32)
            faiss.normalize_L2(query_embedding)
            
            # 向量检索召回候选集
            vector_scores, indices = self.index.search(
                query_embedding,
                top_k * Config.RERANK_EXPAND_FACTOR
            )
            logger.info(f"向量召回 - 索引: {indices[0]}, 分数: {vector_scores[0]}")
            
            # 获取文档内容
            candidates = self.get_documents_by_indices(indices[0].tolist())
            
            # BM25重排序
            bm25 = BM25Okapi([doc.split() for doc in candidates])
            bm25_scores = bm25.get_scores(query.split())
            logger.info(f"BM25重排序分数: {bm25_scores}")
            
            # 构建结果
            results = []
            for i, (idx, bm_score) in enumerate(zip(indices[0], bm25_scores)):
                if bm_score >= Config.BM25_THRESHOLD:  # 使用BM25原始分数阈值
                    results.append({
                        'chunk_index': int(idx),
                        'score': float(bm_score),  # 直接使用BM25分数
                        'vector_score': float(vector_scores[0][i]),  # 保留向量分数供参考
                        'bm25_score': float(bm_score)
                    })
            
            # 按BM25分数排序
            results.sort(key=lambda x: x['score'], reverse=True)
            results = results[:top_k]
            
            logger.info(f"重排序后结果:")
            for r in results:
                logger.info(f"索引: {r['chunk_index']}, BM25分数: {r['score']:.4f}, "
                          f"向量分数: {r['vector_score']:.4f}")
            
            return results
            
        except Exception as e:
            logger.error(f"重排序检索失败: {str(e)}")
            return []
    
    def save(self, path: str):
        """保存索引和数据库"""
        try:
            os.makedirs(path, exist_ok=True)
            
            # 保存向量索引
            faiss.write_index(self.index, os.path.join(path, Config.FAISS_INDEX_FILE))
            
            # 保存BM25索引
            if self.bm25 is not None:
                with open(os.path.join(path, Config.BM25_INDEX_FILE), 'wb') as f:
                    pickle.dump(self.bm25, f)
            
            # 复制数据库
            target_db = os.path.join(path, "documents.db")
            if os.path.abspath(self.db_path) != os.path.abspath(target_db):
                import shutil
                shutil.copy2(self.db_path, target_db)
            
        except Exception as e:
            logger.error(f"保存索引失败: {str(e)}", exc_info=True)
            raise
    
    def load(self, path: str):
        """加载索引和数据库"""
        try:
            # 加载向量索引
            self.index = faiss.read_index(os.path.join(path, Config.FAISS_INDEX_FILE))
            
            # 加载BM25索引
            bm25_path = os.path.join(path, Config.BM25_INDEX_FILE)
            if os.path.exists(bm25_path):
                with open(bm25_path, 'rb') as f:
                    self.bm25 = pickle.load(f)
            
            # 更新数据库路径
            self.db_path = os.path.join(path, "documents.db")
            
        except Exception as e:
            logger.error(f"加载索引失败: {str(e)}", exc_info=True)
            raise

    def get_documents_by_indices(self, indices: List[int]) -> List[str]:
        """根据索引获取文档内容"""
        try:
            if not indices:
                return []
                
            # 使用 IN 语句按需查询
            placeholders = ','.join('?' * len(indices))
            query = f"SELECT chunk_index, content FROM documents WHERE chunk_index IN ({placeholders}) ORDER BY chunk_index"
            
            conn = sqlite3.connect(self.db_path)
            c = conn.cursor()
            c.execute(query, indices)
            results = c.fetchall()
            conn.close()
            
            # 使用字典保证返回顺序与请求顺序一致
            doc_map = {row[0]: row[1] for row in results}
            documents = []
            for idx in indices:
                if idx in doc_map:
                    documents.append(doc_map[idx])
            
            return documents
            
        except Exception as e:
            logger.error(f"获取文档内容失败: {str(e)}")
            return []
    
    def _get_doc_types_by_indices(self, indices: List[int]) -> List[str]:
        """根据索引获取文档类型"""
        try:
            if not indices:
                return []
                
            placeholders = ','.join('?' * len(indices))
            query = f"SELECT chunk_index, doc_type FROM documents WHERE chunk_index IN ({placeholders}) ORDER BY chunk_index"
            
            conn = sqlite3.connect(self.db_path)
            c = conn.cursor()
            c.execute(query, indices)
            results = c.fetchall()
            conn.close()
            
            # 创建索引到文档类型的映射
            doc_type_map = {row[0]: row[1] for row in results}
            
            # 按照索引顺序返回文档类型
            doc_types = []
            for idx in indices:
                doc_types.append(doc_type_map.get(idx, 'paragraph'))  # 默认为段落
                
            return doc_types
            
        except Exception as e:
            logger.error(f"获取文档类型失败: {str(e)}")
            return ['paragraph'] * len(indices)