"""
向量处理模块
实现文本向量化和向量存储功能
"""

import os
from typing import List, Dict, Optional, Union
import logging
from pathlib import Path
import numpy as np
import torch
from sentence_transformers import SentenceTransformer
import faiss

from .document import DocumentChunk

logger = logging.getLogger(__name__)

class Embedding:
    """向量处理类"""
    
    def __init__(self, config: Optional[Dict] = None):
        self.config = config or {}
        self.model_name = self.config.get("model_name", "BAAI/bge-small-zh-v1.5")
        self.device = self.config.get("device", "cuda" if torch.cuda.is_available() else "cpu")
        self.batch_size = self.config.get("batch_size", 32)
        self.vector_store_path = self.config.get("vector_store_path", "vector_store")
        
        self.model = None
        self.index = None
        self.document_lookup = {}
        
    def initialize(self):
        """初始化向量模型和存储"""
        try:
            # 加载模型
            self.model = SentenceTransformer(self.model_name)
            self.model.to(self.device)
            
            # 创建向量存储目录
            os.makedirs(self.vector_store_path, exist_ok=True)
            
            # 初始化FAISS索引
            self.index = faiss.IndexFlatL2(self.model.get_sentence_embedding_dimension())
            
            logger.info(f"向量模型初始化完成: {self.model_name}")
            
        except Exception as e:
            logger.error(f"向量模型初始化失败: {str(e)}")
            raise
            
    def _batch_encode(self, texts: List[str]) -> np.ndarray:
        """批量编码文本"""
        embeddings = []
        
        for i in range(0, len(texts), self.batch_size):
            batch_texts = texts[i:i + self.batch_size]
            with torch.no_grad():
                batch_embeddings = self.model.encode(
                    batch_texts,
                    convert_to_numpy=True,
                    show_progress_bar=False
                )
            embeddings.append(batch_embeddings)
            
        return np.vstack(embeddings)
        
    def add_documents(self, chunks: List[DocumentChunk]):
        """添加文档到向量存储"""
        if not chunks:
            return
            
        # 获取文本列表
        texts = [chunk.text for chunk in chunks]
        
        # 生成向量
        embeddings = self._batch_encode(texts)
        
        # 添加到FAISS索引
        self.index.add(embeddings)
        
        # 更新文档查找表
        start_idx = len(self.document_lookup)
        for i, chunk in enumerate(chunks):
            self.document_lookup[start_idx + i] = chunk
            
        logger.info(f"添加了 {len(chunks)} 个文档块到向量存储")
        
    def search(self, query: str, top_k: int = 3) -> List[Dict]:
        """搜索相似文档"""
        # 生成查询向量
        query_vector = self._batch_encode([query])[0].reshape(1, -1)
        
        # 执行搜索
        distances, indices = self.index.search(query_vector, top_k)
        
        # 整理结果
        results = []
        for i, (distance, idx) in enumerate(zip(distances[0], indices[0])):
            if idx == -1:  # FAISS返回-1表示无效结果
                continue
                
            chunk = self.document_lookup.get(idx)
            if chunk:
                results.append({
                    "chunk": chunk,
                    "score": 1.0 / (1.0 + distance),  # 将距离转换为相似度分数
                    "rank": i + 1
                })
                
        return results
        
    def save(self, path: Optional[str] = None):
        """保存向量存储"""
        if path is None:
            path = self.vector_store_path
            
        # 保存FAISS索引
        index_path = os.path.join(path, "index.faiss")
        faiss.write_index(self.index, index_path)
        
        # 保存文档查找表
        lookup_path = os.path.join(path, "lookup.pt")
        torch.save(self.document_lookup, lookup_path)
        
        logger.info(f"向量存储已保存到: {path}")
        
    def load(self, path: Optional[str] = None):
        """加载向量存储"""
        if path is None:
            path = self.vector_store_path
            
        # 加载FAISS索引
        index_path = os.path.join(path, "index.faiss")
        if os.path.exists(index_path):
            self.index = faiss.read_index(index_path)
            
        # 加载文档查找表
        lookup_path = os.path.join(path, "lookup.pt")
        if os.path.exists(lookup_path):
            self.document_lookup = torch.load(lookup_path)
            
        logger.info(f"向量存储已加载: {path}")
        
    def clear(self):
        """清空向量存储"""
        if self.index is not None:
            self.index.reset()
        self.document_lookup.clear()
        logger.info("向量存储已清空") 