"""
向量化模块 - 使用本地sentence-transformers模型或OpenAI API生成向量嵌入
"""
import os
import logging
import numpy as np
from typing import List, Dict, Any, Optional, Union
from pathlib import Path
import torch
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
import openai
import httpx

from ..config.settings import VECTOR_MODEL_CONFIG

logger = logging.getLogger(__name__)

class Vectorizer:
    """向量化器"""
    
    def __init__(self):
        """初始化向量化器"""
        try:
            # 从配置加载参数
            self.model_name = VECTOR_MODEL_CONFIG["model_name"]
            self.model_path = Path(VECTOR_MODEL_CONFIG["model_path"])
            self.device = VECTOR_MODEL_CONFIG["device"]
            self.max_length = VECTOR_MODEL_CONFIG["max_length"]
            self.batch_size = VECTOR_MODEL_CONFIG["batch_size"]
            
            # OpenAI API配置
            self.use_openai_api = VECTOR_MODEL_CONFIG["use_openai_api"]
            self.openai_api_base = VECTOR_MODEL_CONFIG["openai_api_base"]
            self.openai_api_key = VECTOR_MODEL_CONFIG["openai_api_key"]
            self.openai_model_name = VECTOR_MODEL_CONFIG["openai_model_name"]
            
            self.model = None
            self.openai_client = None
            self.dimension = 768  # 默认维度
            
            # 加载模型
            self._load_model()
            
            # 更新向量存储配置中的维度
            from ..config.settings import VECTOR_STORE_CONFIG
            VECTOR_STORE_CONFIG["dimension"] = self.dimension
            
            logger.info(f"向量化器初始化成功，使用{'OpenAI API' if self.use_openai_api else '本地模型'}，维度: {self.dimension}")
            
        except Exception as e:
            logger.error(f"向量化器初始化失败: {e}")
            raise
    
    def _load_model(self):
        """加载模型"""
        try:
            if self.use_openai_api:
                logger.info(f"使用OpenAI API进行向量化: {self.openai_api_base}")
                # 创建OpenAI客户端
                self.openai_client = openai.OpenAI(
                    base_url=self.openai_api_base,
                    api_key=self.openai_api_key,
                    http_client=httpx.Client(timeout=30.0)
                )
                # 设置维度（根据模型确定）
                if "ada" in self.openai_model_name:
                    self.dimension = 1536
                elif "babbage" in self.openai_model_name:
                    self.dimension = 2048
                elif "curie" in self.openai_model_name:
                    self.dimension = 4096
                elif "davinci" in self.openai_model_name:
                    self.dimension = 12288
                elif "qwen3-embedding" in self.openai_model_name:
                    self.dimension = 1024  # qwen3-embedding模型维度
                else:
                    self.dimension = 1536  # 默认维度
                logger.info(f"OpenAI API客户端初始化成功，模型: {self.openai_model_name}，维度: {self.dimension}")
            else:
                logger.info(f"正在加载本地向量模型: {self.model_name}")
                
                # 检查本地模型路径
                if os.path.exists(self.model_path):
                    logger.info(f"使用本地模型: {self.model_path}")
                    self.model = SentenceTransformer(self.model_path, device=self.device)
                else:
                    logger.info(f"从HuggingFace下载模型: {self.model_name}")
                    self.model = SentenceTransformer(self.model_name, device=self.device)
                    
                    # 保存到本地
                    os.makedirs(self.model_path, exist_ok=True)
                    self.model.save(self.model_path)
                    logger.info(f"模型已保存到: {self.model_path}")
                
                # 获取向量维度
                test_embedding = self.model.encode("测试文本", convert_to_tensor=True)
                self.dimension = test_embedding.shape[0]
                
                logger.info(f"本地模型加载成功，向量维度: {self.dimension}")
            
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            raise
    
    def encode_text(self, text: str) -> np.ndarray:
        """
        编码单个文本
        
        Args:
            text: 输入文本
            
        Returns:
            文本向量
        """
        try:
            if not text or not text.strip():
                logger.warning("输入文本为空，返回零向量")
                return np.zeros(self.dimension)
            
            # 使用批量编码处理单个文本
            embeddings = self.encode_texts([text.strip()], show_progress=False)
            logger.debug(f"文本 '{text[:50]}...' 向量化完成，维度: {len(embeddings[0])}")
            return embeddings[0]
            
        except Exception as e:
            logger.error(f"文本编码失败: {e}")
            return np.zeros(self.dimension)
    
    def encode_texts(self, texts: List[str], show_progress: bool = True) -> List[np.ndarray]:
        """
        批量编码文本
        
        Args:
            texts: 文本列表
            show_progress: 是否显示进度条
            
        Returns:
            向量嵌入列表
        """
        try:
            if not texts:
                return []
            
            # 过滤空文本
            valid_texts = []
            valid_indices = []
            
            for i, text in enumerate(texts):
                if text and text.strip():
                    # 截断文本
                    if len(text) > self.max_length * 4:  # 粗略估计字符数
                        text = text[:self.max_length * 4]
                    valid_texts.append(text)
                    valid_indices.append(i)
            
            if not valid_texts:
                return [np.zeros(self.dimension)] * len(texts)
            
            if self.use_openai_api:
                # 使用OpenAI API进行批量向量化
                embeddings = []
                # OpenAI API有输入长度限制，需要分批处理
                batch_size = 50  # 每批处理50个文本
                
                if show_progress:
                    iterator = tqdm(
                        range(0, len(valid_texts), batch_size),
                        desc="生成向量嵌入"
                    )
                else:
                    iterator = range(0, len(valid_texts), batch_size)
                
                for i in iterator:
                    batch_texts = valid_texts[i:i + batch_size]
                    try:
                        response = self.openai_client.embeddings.create(
                            input=batch_texts,
                            model=self.openai_model_name
                        )
                        batch_embeddings = [np.array(item.embedding) for item in response.data]
                        embeddings.extend(batch_embeddings)
                    except Exception as e:
                        logger.error(f"OpenAI API批量向量化失败: {e}")
                        # 失败时使用零向量填充
                        embeddings.extend([np.zeros(self.dimension)] * len(batch_texts))
                
                # 重建完整结果
                result = [np.zeros(self.dimension)] * len(texts)
                for i, embedding in zip(valid_indices, embeddings):
                    result[i] = embedding
                
                return result
            else:
                # 使用本地模型进行批量向量化
                # 批量编码
                embeddings = []
                if show_progress:
                    iterator = tqdm(
                        range(0, len(valid_texts), self.batch_size),
                        desc="生成向量嵌入"
                    )
                else:
                    iterator = range(0, len(valid_texts), self.batch_size)
                
                for i in iterator:
                    batch_texts = valid_texts[i:i + self.batch_size]
                    batch_embeddings = self.model.encode(batch_texts, convert_to_tensor=False)
                    embeddings.extend(batch_embeddings)
                
                # 重建完整结果
                result = [np.zeros(self.dimension)] * len(texts)
                for i, embedding in zip(valid_indices, embeddings):
                    result[i] = embedding
                
                return result
            
        except Exception as e:
            logger.error(f"批量文本编码失败: {e}")
            return [np.zeros(self.dimension)] * len(texts)
    
    def encode_chunks(self, chunks: List[Dict[str, Any]], show_progress: bool = True) -> List[Dict[str, Any]]:
        """
        编码文本块
        
        Args:
            chunks: 文本块列表
            show_progress: 是否显示进度条
            
        Returns:
            包含向量的文本块列表
        """
        try:
            if not chunks:
                logger.warning("没有文本块需要编码")
                return []
            
            logger.info(f"开始编码 {len(chunks)} 个文本块")
            
            # 提取文本内容
            texts = [chunk.get("content", "") for chunk in chunks]
            
            # 检查文本内容
            empty_count = sum(1 for text in texts if not text or not text.strip())
            if empty_count > 0:
                logger.warning(f"发现 {empty_count} 个空文本块")
            
            # 批量编码
            embeddings = self.encode_texts(texts, show_progress)
            
            # 更新文本块
            for chunk, embedding in zip(chunks, embeddings):
                chunk["embedding"] = embedding.tolist()
                chunk["embedding_dimension"] = self.dimension
                chunk["embedding_model"] = self.openai_model_name if self.use_openai_api else self.model_name
            
            logger.info(f"成功编码 {len(chunks)} 个文本块")
            # 记录向量化结果详情
            for i, (chunk, embedding) in enumerate(zip(chunks, embeddings)):
                logger.debug(f"向量化结果 {i}: 内容='{chunk['content'][:50]}...', 向量维度={len(embedding)}")
            
            return chunks
            
        except Exception as e:
            logger.error(f"文本块编码失败: {e}")
            return chunks
    
    def get_similarity(self, embedding1: np.ndarray, embedding2: np.ndarray) -> float:
        """
        计算两个向量的相似度
        
        Args:
            embedding1: 向量1
            embedding2: 向量2
            
        Returns:
            相似度分数
        """
        try:
            # 归一化向量
            norm1 = np.linalg.norm(embedding1)
            norm2 = np.linalg.norm(embedding2)
            
            if norm1 == 0 or norm2 == 0:
                return 0.0
            
            # 计算余弦相似度
            similarity = np.dot(embedding1, embedding2) / (norm1 * norm2)
            return float(similarity)
            
        except Exception as e:
            logger.error(f"计算相似度失败: {e}")
            return 0.0
    
    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        if self.use_openai_api:
            return {
                "model_name": self.openai_model_name,
                "model_path": self.openai_api_base,
                "device": "remote",
                "dimension": self.dimension,
                "max_length": self.max_length,
                "batch_size": self.batch_size,
                "use_openai_api": True
            }
        else:
            return {
                "model_name": self.model_name,
                "model_path": self.model_path,
                "device": self.device,
                "dimension": self.dimension,
                "max_length": self.max_length,
                "batch_size": self.batch_size,
                "use_openai_api": False
            }

class BatchVectorizer:
    """批量向量化器"""
    
    def __init__(self, vectorizer: Vectorizer = None):
        self.vectorizer = vectorizer or Vectorizer()
    
    def process_document_chunks(self, document_result: Dict[str, Any]) -> Dict[str, Any]:
        """
        处理文档分块的向量化
        
        Args:
            document_result: 文档处理结果
            
        Returns:
            包含向量的文档结果
        """
        try:
            if not document_result.get("success", False):
                logger.warning("文档处理失败，无法进行向量化")
                return document_result
            
            chunks = document_result.get("chunks", [])
            if not chunks:
                logger.warning("没有分块需要向量化")
                return document_result
            
            logger.info(f"开始处理文档分块向量化，共 {len(chunks)} 个分块")
            
            # 向量化文本块
            vectorized_chunks = self.vectorizer.encode_chunks(chunks)
            
            # 更新结果
            document_result["chunks"] = vectorized_chunks
            document_result["vector_count"] = len(vectorized_chunks)
            document_result["embedding_model"] = self.vectorizer.model_name
            document_result["embedding_dimension"] = self.vectorizer.dimension
            
            logger.info(f"文档向量化完成，共处理 {len(vectorized_chunks)} 个分块")
            return document_result
            
        except Exception as e:
            error_msg = f"文档向量化失败: {e}"
            logger.error(error_msg)
            document_result["success"] = False
            document_result["error"] = error_msg
            return document_result
    
    def batch_process_documents(self, document_results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        批量处理文档向量化
        
        Args:
            document_results: 文档处理结果列表
            
        Returns:
            包含向量的文档结果列表
        """
        results = []
        
        for doc_result in document_results:
            try:
                vectorized_result = self.process_document_chunks(doc_result)
                results.append(vectorized_result)
            except Exception as e:
                logger.error(f"批量向量化失败: {e}")
                doc_result["success"] = False
                doc_result["error"] = str(e)
                results.append(doc_result)
        
        return results

# 全局实例
vectorizer = Vectorizer()
batch_vectorizer = BatchVectorizer(vectorizer)
