from typing import List, Dict, Any, Optional
import numpy as np
import faiss
import os
import pickle
from ..embeddings.base import BaseEmbedding
from .base import BaseVectorStore

class FAISSStore(BaseVectorStore):
    def __init__(self, embedding_model: BaseEmbedding, index_path: str = "data/vector_store/faiss.index"):
        """
        初始化FAISS向量存储
        参数:
            embedding_model: 向量化模型实例
            index_path: 索引文件路径
        """
        self.embedding_model = embedding_model
        self.index_path = index_path
        self.dimension = embedding_model.dimension  # 使用embedding模型的维度
        
        # 初始化存储列表
        self.texts: List[str] = []
        self.metadata: List[Dict[str, Any]] = []
        
        # 创建目录
        os.makedirs(os.path.dirname(index_path), exist_ok=True)
        
        if os.path.exists(index_path):
            print(f"加载已有索引: {index_path}")
            self.index = faiss.read_index(index_path)
            self._load_stored_data()
        else:
            print(f"创建新索引: {index_path}")
            self.index = faiss.IndexFlatIP(self.dimension)  # 使用内积相似度
    
    def similarity_search(self, query: str, k: int = 4) -> List[Dict[str, Any]]:
        """基于相似度搜索"""
        try:
            print("\n=== 开始向量搜索 ===")
            print(f"查询文本: {query}")
            print(f"请求返回数量: {k}")
            
            if self.index.ntotal == 0:
                print("警告: 向量存储为空")
                return []
            
            # 打印当前状态
            print(f"当前索引中的向量数量: {self.index.ntotal}")
            print(f"文本数量: {len(self.texts)}")
            print(f"元数据数量: {len(self.metadata)}")
            
            # 生成查询向量
            print("正在生成查询向量...")
            query_embedding = np.array(self.embedding_model.encode(query))
            print(f"原始查询向量维度: {query_embedding.shape}")
            
            if len(query_embedding.shape) == 1:
                query_embedding = query_embedding.reshape(1, -1)
            query_embedding = query_embedding.astype(np.float32)
            print(f"处理后查询向量维度: {query_embedding.shape}")
            
            # 归一化查询向量
            print("正在归一化查询向量...")
            faiss.normalize_L2(query_embedding)
            
            # 搜索最近邻
            print("开始搜索最近邻...")
            k = min(k, self.index.ntotal)
            print(f"实际搜索数量: {k}")
            scores, indices = self.index.search(query_embedding, k)
            print(f"搜索结果 - scores: {scores.shape}, indices: {indices.shape}")
            
            # 返回结果
            results = []
            print("\n处理搜索结果...")
            for i, (score, idx) in enumerate(zip(scores[0], indices[0])):
                print(f"处理第 {i+1} 个结果:")
                print(f"  - 索引: {idx}")
                print(f"  - 分数: {score}")
                
                if idx >= 0 and idx < len(self.texts):
                    try:
                        result = {
                            "content": self.texts[idx],
                            "metadata": self.metadata[idx],
                            "score": float(score)
                        }
                        print(f"  - 内容长度: {len(result['content'])}")
                        print(f"  - 元数据: {result['metadata']}")
                        results.append(result)
                    except Exception as e:
                        print(f"  - 处理结果时出错: {str(e)}")
            
            print(f"\n找到 {len(results)} 个相关文档")
            return results
            
        except Exception as e:
            print("\n=== 向量搜索出错 ===")
            print(f"错误类型: {type(e).__name__}")
            print(f"错误信息: {str(e)}")
            if 'query_embedding' in locals():
                print(f"查询向量维度: {query_embedding.shape}")
            print(f"索引维度: {self.dimension}")
            print(f"索引类型: {type(self.index).__name__}")
            raise
    
    def add_texts(self, texts: List[str], metadata: Optional[List[Dict[str, Any]]] = None) -> None:
        """添加文本到向量存储"""
        if not texts:
            return
        
        try:
            # 确保metadata列表长度与texts相同
            if metadata is None:
                metadata = [{} for _ in texts]
            elif len(metadata) != len(texts):
                raise ValueError("metadata长度必须与texts长度相同")
            
            # 生成向量嵌入
            embeddings = np.array(self.embedding_model.encode(texts))
            if len(embeddings.shape) == 1:
                embeddings = embeddings.reshape(1, -1)
            embeddings = embeddings.astype(np.float32)
            
            # 归一化向量
            faiss.normalize_L2(embeddings)
            
            # 打印调试信息
            print(f"添加向量数量: {len(texts)}")
            print(f"向量维度: {embeddings.shape}")
            
            # 添加到FAISS索引
            self.index.add(embeddings)
            
            # 保存文本和metadata
            self.texts.extend(texts)
            self.metadata.extend(metadata)
            
            # 保存到磁盘
            self.save()
            
            print(f"成功添加文档，当前总数: {len(self.texts)}")
            
        except Exception as e:
            print(f"添加文本时出错: {str(e)}")
            raise
    
    def save(self):
        """保存索引和数据到磁盘"""
        try:
            # 保存FAISS索引
            faiss.write_index(self.index, self.index_path)
            
            # 保存文本和metadata
            data_path = f"{self.index_path}.data"
            with open(data_path, 'wb') as f:
                pickle.dump({
                    'texts': self.texts,
                    'metadata': self.metadata
                }, f)
                
            print(f"成功保存索引到: {self.index_path}")
            print(f"成功保存数据到: {data_path}")
            
        except Exception as e:
            print(f"保存向量存储时出错: {str(e)}")
            raise
    
    def _load_stored_data(self):
        """从磁盘加载保存的数据"""
        try:
            data_path = f"{self.index_path}.data"
            if os.path.exists(data_path):
                with open(data_path, 'rb') as f:
                    data = pickle.load(f)
                    self.texts = data.get('texts', [])
                    self.metadata = data.get('metadata', [])
                print(f"成功加载数据，文档数量: {len(self.texts)}")
            else:
                print(f"警告: 未找到数据文件 {data_path}")
                
        except Exception as e:
            print(f"加载存储数据时出错: {str(e)}")
            raise
    
    @property
    def total_docs(self) -> int:
        """返回存储的文档总数"""
        return len(self.texts)
