"""
FAISS索引模块
使用FAISS构建高效的向量检索索引
"""
import numpy as np
import faiss
import pickle
from pathlib import Path
from typing import List, Dict, Tuple
import logging

from config import FAISS_INDEX_DIR, EMBEDDING_DIM

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class FAISSIndexer:
    """FAISS向量索引"""
    
    def __init__(self, dimension: int = EMBEDDING_DIM, use_gpu: bool = False):
        """
        初始化FAISS索引
        
        Args:
            dimension: 向量维度
            use_gpu: 是否使用GPU
        """
        self.dimension = dimension
        self.index = faiss.IndexFlatIP(dimension)  # 内积索引（适用于归一化向量）
        
        if use_gpu and faiss.get_num_gpus() > 0:
            res = faiss.StandardGpuResources()
            self.index = faiss.index_cpu_to_gpu(res, 0, self.index)
            logger.info("使用GPU加速")
        
        self.pmid_to_index = {}  # PMID到索引ID的映射
        self.index_to_pmid = {}  # 索引ID到PMID的映射
    
    def add_vectors(self, vectors: np.ndarray, pmids: List[str]):
        """
        添加向量到索引
        
        Args:
            vectors: 向量矩阵
            pmids: 对应的PMID列表
        """
        # 归一化向量（用于余弦相似度）
        faiss.normalize_L2(vectors)
        
        start_idx = self.index.ntotal
        
        for i, pmid in enumerate(pmids):
            idx = start_idx + i
            self.pmid_to_index[pmid] = idx
            self.index_to_pmid[idx] = pmid
        
        self.index.add(vectors)
        logger.info(f"已添加 {len(vectors)} 个向量到索引")
    
    def search(
        self, 
        query_vector: np.ndarray, 
        top_k: int = 10
    ) -> Tuple[List[int], List[float]]:
        """
        搜索最相似的向量
        
        Args:
            query_vector: 查询向量
            top_k: 返回前k个结果
            
        Returns:
            (索引列表, 相似度分数列表)
        """
        query_vector = query_vector.reshape(1, -1)
        faiss.normalize_L2(query_vector)
        
        distances, indices = self.index.search(query_vector, top_k)
        
        indices = indices[0].tolist()
        scores = distances[0].tolist()
        
        return indices, scores
    
    def search_by_pmid(
        self, 
        query_pmid: str, 
        top_k: int = 10
    ) -> List[Dict]:
        """
        根据PMID查找相似论文
        
        Args:
            query_pmid: 查询PMID
            top_k: 返回前k个结果
            
        Returns:
            相似论文列表，包含PMID和相似度分数
        """
        if query_pmid not in self.pmid_to_index:
            logger.warning(f"PMID {query_pmid} 不在索引中")
            return []
        
        idx = self.pmid_to_index[query_pmid]
        
        # 获取该向量
        vector = self.index.reconstruct(idx)
        
        # 搜索
        indices, scores = self.search(vector, top_k + 1)  # +1因为会返回自己
        
        # 转换为PMID
        results = []
        for i, score in zip(indices, scores):
            pmid = self.index_to_pmid.get(i)
            if pmid and pmid != query_pmid:  # 排除自己
                results.append({"pmid": pmid, "score": score})
        
        return results[:top_k]
    
    def search_by_vector(
        self, 
        query_vector: np.ndarray, 
        top_k: int = 10
    ) -> List[Dict]:
        """
        根据查询向量查找相似论文
        
        Args:
            query_vector: 查询向量
            top_k: 返回前k个结果
            
        Returns:
            相似论文列表
        """
        indices, scores = self.search(query_vector, top_k)
        
        results = []
        for i, score in zip(indices, scores):
            pmid = self.index_to_pmid.get(i)
            if pmid:
                results.append({"pmid": pmid, "score": score})
        
        return results
    
    def save(self, save_path: str):
        """
        保存索引到文件
        
        Args:
            save_path: 保存路径
        """
        Path(save_path).parent.mkdir(parents=True, exist_ok=True)
        
        # 保存FAISS索引
        faiss.write_index(self.index, f"{save_path}.faiss")
        
        # 保存映射关系
        with open(f"{save_path}.pkl", "wb") as f:
            pickle.dump({
                "pmid_to_index": self.pmid_to_index,
                "index_to_pmid": self.index_to_pmid,
                "dimension": self.dimension
            }, f)
        
        logger.info(f"索引已保存到: {save_path}")
    
    def load(self, load_path: str):
        """
        从文件加载索引
        
        Args:
            load_path: 加载路径
        """
        # 加载FAISS索引
        self.index = faiss.read_index(f"{load_path}.faiss")
        
        # 加载映射关系
        with open(f"{load_path}.pkl", "rb") as f:
            data = pickle.load(f)
            self.pmid_to_index = data["pmid_to_index"]
            self.index_to_pmid = data["index_to_pmid"]
            self.dimension = data["dimension"]
        
        logger.info(f"索引已从 {load_path} 加载")
        logger.info(f"索引中包含 {self.index.ntotal} 个向量")
    
    def get_stats(self) -> Dict:
        """获取索引统计信息"""
        return {
            "total_vectors": self.index.ntotal,
            "dimension": self.dimension,
            "pmid_count": len(self.pmid_to_index)
        }


if __name__ == "__main__":
    # 测试索引
    indexer = FAISSIndexer(dimension=768)
    
    # 创建测试数据
    test_vectors = np.random.rand(10, 768).astype('float32')
    test_pmids = [f"PMID{i}" for i in range(10)]
    
    indexer.add_vectors(test_vectors, test_pmids)
    
    # 测试搜索
    query = np.random.rand(1, 768).astype('float32')
    results = indexer.search_by_vector(query, top_k=3)
    
    print("搜索结果:")
    for r in results:
        print(f"PMID: {r['pmid']}, Score: {r['score']:.4f}")
    
    print(f"\n索引统计: {indexer.get_stats()}")
