import os
import yaml
import faiss
import numpy as np
import pickle
from typing import List, Dict, Any

class VectorStore:
    def __init__(self, embedding_model, config_path="config/config.yaml"):
        # 加载配置
        with open(config_path, 'r') as f:
            self.config = yaml.safe_load(f)
        
        self.embedding_model = embedding_model
        self.index = None
        self.documents = []
        self.embeddings = []
        self.index_path = self.config['vector_store']['index_path']
        self.metadata_path = os.path.join(os.path.dirname(self.index_path), "metadata.pkl")
        
        # 创建存储目录
        os.makedirs(os.path.dirname(self.index_path), exist_ok=True)
    
    def _get_embedding(self, text: str):
        """获取文本嵌入"""
        return self.embedding_model.encode(text)
    
    def add_documents(self, documents):
        """添加文档并构建索引"""
        print(f"Adding {len(documents)} documents to vector store...")
        
        # 存储文档
        self.documents.extend(documents)
        
        # 获取嵌入
        texts = [doc.page_content for doc in documents]
        new_embeddings = self.embedding_model.encode(texts)
        
        # 确保嵌入是numpy数组，并具有正确的形状
        if not isinstance(new_embeddings, np.ndarray):
            new_embeddings = np.array(new_embeddings)
            
        # 打印嵌入的形状以便调试
        print(f"Embeddings shape: {new_embeddings.shape}")
        
        # 确保是2D数组 (n_samples, n_features)
        if len(new_embeddings.shape) == 1:
            # 如果是1D数组，转为2D (单个样本情况)
            new_embeddings = new_embeddings.reshape(1, -1)
        
        # 存储嵌入
        self.embeddings.extend(new_embeddings)
        
        # 创建或更新FAISS索引
        dimension = new_embeddings.shape[1]  # 从实际嵌入中获取维度
        if self.index is None:
            self.index = faiss.IndexFlatL2(dimension)
        
        # 转换为float32类型并进行归一化
        new_embeddings_float32 = new_embeddings.astype('float32')
        faiss.normalize_L2(new_embeddings_float32)
        
        # 添加到索引
        self.index.add(new_embeddings_float32)
        
        print(f"Added {len(documents)} documents. Total: {len(self.documents)}")
        return len(documents)
    
    def save(self):
        """保存索引和元数据"""
        # 保存FAISS索引
        if self.index is not None:
            faiss.write_index(self.index, self.index_path)
            
            # 保存文档元数据
            with open(self.metadata_path, 'wb') as f:
                pickle.dump({
                    'documents': self.documents,
                    'embeddings': self.embeddings
                }, f)
            
            print(f"Vector store saved to {self.index_path}")
        else:
            print("Warning: No index to save.")
    
    def load(self):
        """加载索引和元数据"""
        if not os.path.exists(self.index_path) or not os.path.exists(self.metadata_path):
            print("No existing index found.")
            return False
        
        try:
            # 加载FAISS索引
            self.index = faiss.read_index(self.index_path)
            
            # 加载文档元数据
            with open(self.metadata_path, 'rb') as f:
                data = pickle.load(f)
                self.documents = data['documents']
                self.embeddings = data['embeddings']
            
            print(f"Loaded vector store with {len(self.documents)} documents")
            return True
        except Exception as e:
            print(f"Error loading vector store: {e}")
            return False
    
    def search(self, query: str, top_k: int = None):
        """搜索相关文档"""
        if self.index is None:
            raise ValueError("Index not initialized. Call load() or add_documents() first")
        
        if top_k is None:
            top_k = self.config['retrieval']['top_k']
        
        # 获取查询嵌入
        query_embedding = self._get_embedding(query)
        
        # 确保是numpy数组，并且形状正确
        if not isinstance(query_embedding, np.ndarray):
            query_embedding = np.array(query_embedding)
        
        # 确保是2D数组
        if len(query_embedding.shape) == 1:
            query_embedding = query_embedding.reshape(1, -1)
            
        # 转换为float32类型
        query_embedding = query_embedding.astype('float32')
        
        # 归一化
        faiss.normalize_L2(query_embedding)
        
        # 执行搜索
        distances, indices = self.index.search(query_embedding, top_k)
        
        # 获取匹配文档
        results = []
        for i, idx in enumerate(indices[0]):
            if idx < len(self.documents):
                results.append({
                    'document': self.documents[idx],
                    'score': float(1 - distances[0][i])  # 转换距离为相似度分数
                })
        
        return results