"""向量化器 - 文档向量化和相似度计算工具"""

import asyncio
import hashlib
import numpy as np
from typing import List, Dict, Any, Optional, Tuple, Union
from dataclasses import dataclass
from loguru import logger

# 嵌入模型库
try:
    from sentence_transformers import SentenceTransformer
except ImportError as e:
    logger.warning(f"SentenceTransformer库导入失败: {e}")
    SentenceTransformer = None

try:
    import openai
except ImportError as e:
    logger.warning(f"OpenAI库导入失败: {e}")
    openai = None

try:
    from transformers import AutoTokenizer, AutoModel
    import torch
except ImportError as e:
    logger.warning(f"Transformers库导入失败: {e}")
    AutoTokenizer = None
    AutoModel = None
    torch = None


@dataclass
class VectorMetadata:
    """向量元数据"""
    vector_id: str
    model_name: str
    dimension: int
    created_at: str
    text_hash: str
    chunk_id: str
    similarity_threshold: float
    

class EmbeddingModel:
    """嵌入模型基类"""
    
    def __init__(self, model_name: str):
        self.model_name = model_name
        self.dimension = 0
        self.model = None
    
    async def encode(self, texts: Union[str, List[str]]) -> np.ndarray:
        """编码文本为向量"""
        raise NotImplementedError
    
    async def encode_batch(self, texts: List[str], batch_size: int = 32) -> List[np.ndarray]:
        """批量编码文本"""
        results = []
        for i in range(0, len(texts), batch_size):
            batch = texts[i:i + batch_size]
            batch_vectors = await self.encode(batch)
            if isinstance(batch_vectors, np.ndarray) and batch_vectors.ndim == 2:
                results.extend(batch_vectors)
            else:
                results.append(batch_vectors)
        return results
    
    def get_dimension(self) -> int:
        """获取向量维度"""
        return self.dimension


class SentenceTransformerModel(EmbeddingModel):
    """SentenceTransformer模型"""
    
    def __init__(self, model_name: str = "all-MiniLM-L6-v2"):
        super().__init__(model_name)
        if SentenceTransformer is None:
            raise ImportError("SentenceTransformer库未安装")
        
        try:
            self.model = SentenceTransformer(model_name)
            self.dimension = self.model.get_sentence_embedding_dimension()
            logger.info(f"SentenceTransformer模型加载成功: {model_name}, 维度: {self.dimension}")
        except Exception as e:
            logger.error(f"SentenceTransformer模型加载失败: {e}")
            raise
    
    async def encode(self, texts: Union[str, List[str]]) -> np.ndarray:
        """编码文本为向量"""
        try:
            if isinstance(texts, str):
                texts = [texts]
            
            # 在线程池中运行编码（避免阻塞）
            loop = asyncio.get_event_loop()
            vectors = await loop.run_in_executor(
                None, 
                self.model.encode, 
                texts
            )
            
            return np.array(vectors)
            
        except Exception as e:
            logger.error(f"SentenceTransformer编码失败: {e}")
            raise


class OpenAIEmbeddingModel(EmbeddingModel):
    """OpenAI嵌入模型"""
    
    def __init__(self, model_name: str = "text-embedding-ada-002", api_key: Optional[str] = None):
        super().__init__(model_name)
        if openai is None:
            raise ImportError("OpenAI库未安装")
        
        if api_key:
            openai.api_key = api_key
        
        # 不同模型的维度
        model_dimensions = {
            "text-embedding-ada-002": 1536,
            "text-embedding-3-small": 1536,
            "text-embedding-3-large": 3072
        }
        
        self.dimension = model_dimensions.get(model_name, 1536)
        logger.info(f"OpenAI嵌入模型初始化: {model_name}, 维度: {self.dimension}")
    
    async def encode(self, texts: Union[str, List[str]]) -> np.ndarray:
        """编码文本为向量"""
        try:
            if isinstance(texts, str):
                texts = [texts]
            
            # 调用OpenAI API
            response = await openai.Embedding.acreate(
                model=self.model_name,
                input=texts
            )
            
            vectors = [item['embedding'] for item in response['data']]
            return np.array(vectors)
            
        except Exception as e:
            logger.error(f"OpenAI编码失败: {e}")
            raise


class HuggingFaceModel(EmbeddingModel):
    """HuggingFace Transformers模型"""
    
    def __init__(self, model_name: str = "sentence-transformers/all-MiniLM-L6-v2"):
        super().__init__(model_name)
        if AutoTokenizer is None or AutoModel is None or torch is None:
            raise ImportError("Transformers库未安装")
        
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.model = AutoModel.from_pretrained(model_name)
            
            # 获取模型维度
            with torch.no_grad():
                dummy_input = self.tokenizer("test", return_tensors="pt", padding=True, truncation=True)
                dummy_output = self.model(**dummy_input)
                self.dimension = dummy_output.last_hidden_state.shape[-1]
            
            logger.info(f"HuggingFace模型加载成功: {model_name}, 维度: {self.dimension}")
            
        except Exception as e:
            logger.error(f"HuggingFace模型加载失败: {e}")
            raise
    
    async def encode(self, texts: Union[str, List[str]]) -> np.ndarray:
        """编码文本为向量"""
        try:
            if isinstance(texts, str):
                texts = [texts]
            
            # 在线程池中运行编码
            loop = asyncio.get_event_loop()
            vectors = await loop.run_in_executor(None, self._encode_sync, texts)
            
            return vectors
            
        except Exception as e:
            logger.error(f"HuggingFace编码失败: {e}")
            raise
    
    def _encode_sync(self, texts: List[str]) -> np.ndarray:
        """同步编码方法"""
        with torch.no_grad():
            inputs = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=512)
            outputs = self.model(**inputs)
            
            # 使用[CLS] token或平均池化
            if hasattr(outputs, 'pooler_output') and outputs.pooler_output is not None:
                vectors = outputs.pooler_output
            else:
                # 平均池化
                vectors = outputs.last_hidden_state.mean(dim=1)
            
            return vectors.cpu().numpy()


class VectorStore:
    """向量存储基类"""
    
    async def add_vectors(self, vectors: List[np.ndarray], 
                         metadatas: List[Dict[str, Any]]) -> List[str]:
        """添加向量"""
        raise NotImplementedError
    
    async def search_similar(self, query_vector: np.ndarray, 
                           top_k: int = 10, 
                           threshold: float = 0.7) -> List[Tuple[str, float, Dict[str, Any]]]:
        """搜索相似向量"""
        raise NotImplementedError
    
    async def delete_vectors(self, vector_ids: List[str]) -> bool:
        """删除向量"""
        raise NotImplementedError


class InMemoryVectorStore(VectorStore):
    """内存向量存储（用于测试和小规模应用）"""
    
    def __init__(self):
        self.vectors = {}  # vector_id -> np.ndarray
        self.metadatas = {}  # vector_id -> metadata
    
    async def add_vectors(self, vectors: List[np.ndarray], 
                         metadatas: List[Dict[str, Any]]) -> List[str]:
        """添加向量"""
        try:
            vector_ids = []
            for vector, metadata in zip(vectors, metadatas):
                vector_id = metadata.get('vector_id') or self._generate_vector_id(vector)
                self.vectors[vector_id] = vector
                self.metadatas[vector_id] = metadata
                vector_ids.append(vector_id)
            
            logger.debug(f"添加 {len(vector_ids)} 个向量到内存存储")
            return vector_ids
            
        except Exception as e:
            logger.error(f"添加向量失败: {e}")
            raise
    
    async def search_similar(self, query_vector: np.ndarray, 
                           top_k: int = 10, 
                           threshold: float = 0.7) -> List[Tuple[str, float, Dict[str, Any]]]:
        """搜索相似向量"""
        try:
            if not self.vectors:
                return []
            
            similarities = []
            for vector_id, stored_vector in self.vectors.items():
                similarity = self._cosine_similarity(query_vector, stored_vector)
                if similarity >= threshold:
                    similarities.append((vector_id, similarity, self.metadatas[vector_id]))
            
            # 按相似度排序
            similarities.sort(key=lambda x: x[1], reverse=True)
            
            return similarities[:top_k]
            
        except Exception as e:
            logger.error(f"搜索相似向量失败: {e}")
            return []
    
    async def delete_vectors(self, vector_ids: List[str]) -> bool:
        """删除向量"""
        try:
            for vector_id in vector_ids:
                self.vectors.pop(vector_id, None)
                self.metadatas.pop(vector_id, None)
            
            logger.debug(f"删除 {len(vector_ids)} 个向量")
            return True
            
        except Exception as e:
            logger.error(f"删除向量失败: {e}")
            return False
    
    def _generate_vector_id(self, vector: np.ndarray) -> str:
        """生成向量ID"""
        vector_str = str(vector.tobytes())
        return hashlib.md5(vector_str.encode()).hexdigest()
    
    def _cosine_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        """计算余弦相似度"""
        try:
            # 确保向量是一维的
            if vec1.ndim > 1:
                vec1 = vec1.flatten()
            if vec2.ndim > 1:
                vec2 = vec2.flatten()
            
            # 计算余弦相似度
            dot_product = np.dot(vec1, vec2)
            norm1 = np.linalg.norm(vec1)
            norm2 = np.linalg.norm(vec2)
            
            if norm1 == 0 or norm2 == 0:
                return 0.0
            
            similarity = dot_product / (norm1 * norm2)
            return float(similarity)
            
        except Exception as e:
            logger.error(f"计算余弦相似度失败: {e}")
            return 0.0


class DocumentVectorizer:
    """文档向量化器"""
    
    def __init__(self, 
                 embedding_model: EmbeddingModel,
                 vector_store: VectorStore,
                 batch_size: int = 32):
        """
        初始化文档向量化器
        
        Args:
            embedding_model: 嵌入模型
            vector_store: 向量存储
            batch_size: 批处理大小
        """
        self.embedding_model = embedding_model
        self.vector_store = vector_store
        self.batch_size = batch_size
        
        logger.info(f"文档向量化器初始化完成: {embedding_model.model_name}")
    
    async def vectorize_chunks(self, chunks: List[Dict[str, Any]], 
                              document_id: str) -> List[str]:
        """向量化文档分块"""
        try:
            if not chunks:
                return []
            
            # 提取文本内容
            texts = [chunk['content'] for chunk in chunks]
            
            # 批量编码
            vectors = await self.embedding_model.encode_batch(texts, self.batch_size)
            
            # 准备元数据
            metadatas = []
            for i, (chunk, vector) in enumerate(zip(chunks, vectors)):
                text_hash = hashlib.md5(chunk['content'].encode()).hexdigest()
                
                metadata = {
                    'vector_id': f"{document_id}_chunk_{i}",
                    'document_id': document_id,
                    'chunk_id': chunk['metadata'].get('chunk_id', f'chunk_{i}'),
                    'model_name': self.embedding_model.model_name,
                    'dimension': self.embedding_model.get_dimension(),
                    'text_hash': text_hash,
                    'content': chunk['content'],
                    'chunk_metadata': chunk['metadata']
                }
                metadatas.append(metadata)
            
            # 存储向量
            vector_ids = await self.vector_store.add_vectors(vectors, metadatas)
            
            logger.info(f"文档 {document_id} 向量化完成: {len(vector_ids)} 个分块")
            return vector_ids
            
        except Exception as e:
            logger.error(f"文档向量化失败: {e}")
            raise
    
    async def search_similar_chunks(self, query: str, 
                                   top_k: int = 10, 
                                   threshold: float = 0.7) -> List[Dict[str, Any]]:
        """搜索相似分块"""
        try:
            # 编码查询
            query_vector = await self.embedding_model.encode(query)
            if query_vector.ndim > 1:
                query_vector = query_vector[0]  # 取第一个向量
            
            # 搜索相似向量
            results = await self.vector_store.search_similar(
                query_vector, top_k, threshold
            )
            
            # 格式化结果
            formatted_results = []
            for vector_id, similarity, metadata in results:
                formatted_results.append({
                    'vector_id': vector_id,
                    'similarity': similarity,
                    'content': metadata.get('content', ''),
                    'document_id': metadata.get('document_id', ''),
                    'chunk_id': metadata.get('chunk_id', ''),
                    'metadata': metadata
                })
            
            logger.debug(f"查询 '{query[:50]}...' 找到 {len(formatted_results)} 个相似分块")
            return formatted_results
            
        except Exception as e:
            logger.error(f"搜索相似分块失败: {e}")
            return []

    # 兼容别名：与旧版API保持一致
    async def search(self, query: str, top_k: int = 10, threshold: float = 0.7) -> List[Dict[str, Any]]:
        """搜索相似分块的兼容别名，委托到 search_similar_chunks。
        存在该方法以便测试或旧代码可以通过 vectorizer.search 调用。
        """
        try:
            return await self.search_similar_chunks(query, top_k, threshold)
        except Exception as e:
            logger.error(f"向量搜索失败: {e}")
            return []
    
    async def delete_document_vectors(self, document_id: str) -> bool:
        """删除文档的所有向量"""
        try:
            # 这里需要根据实际的向量存储实现来查找和删除
            # 简化实现：假设vector_id包含document_id
            if hasattr(self.vector_store, 'vectors'):
                # 内存存储的情况
                vector_ids_to_delete = [
                    vid for vid in self.vector_store.vectors.keys() 
                    if document_id in vid
                ]
                return await self.vector_store.delete_vectors(vector_ids_to_delete)
            
            logger.warning(f"无法删除文档 {document_id} 的向量：存储类型不支持")
            return False
            
        except Exception as e:
            logger.error(f"删除文档向量失败: {e}")
            return False
    
    async def get_stats(self) -> Dict[str, Any]:
        """获取向量化统计信息"""
        try:
            stats = {
                'model_name': self.embedding_model.model_name,
                'vector_dimension': self.embedding_model.get_dimension(),
                'batch_size': self.batch_size
            }
            
            # 如果是内存存储，添加存储统计
            if hasattr(self.vector_store, 'vectors'):
                stats.update({
                    'total_vectors': len(self.vector_store.vectors),
                    'storage_type': 'memory'
                })
            
            return stats
            
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            return {}


# 工厂函数
def create_embedding_model(model_type: str = "sentence_transformer", 
                          model_name: Optional[str] = None,
                          **kwargs) -> EmbeddingModel:
    """创建嵌入模型"""
    try:
        if model_type == "sentence_transformer":
            model_name = model_name or "all-MiniLM-L6-v2"
            return SentenceTransformerModel(model_name)
        
        elif model_type == "openai":
            model_name = model_name or "text-embedding-ada-002"
            return OpenAIEmbeddingModel(model_name, kwargs.get('api_key'))
        
        elif model_type == "huggingface":
            model_name = model_name or "sentence-transformers/all-MiniLM-L6-v2"
            return HuggingFaceModel(model_name)
        
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
            
    except Exception as e:
        logger.error(f"创建嵌入模型失败: {e}")
        raise


def create_vector_store(store_type: str = "memory", **kwargs) -> VectorStore:
    """创建向量存储"""
    try:
        if store_type == "memory":
            return InMemoryVectorStore()
        
        # 可以扩展其他存储类型（如Faiss、Pinecone等）
        else:
            raise ValueError(f"不支持的存储类型: {store_type}")
            
    except Exception as e:
        logger.error(f"创建向量存储失败: {e}")
        raise