"""
嵌入生成器 - 生成文档和查询的向量嵌入
支持多种嵌入模型和优化策略
"""

import asyncio
from typing import List, Dict, Any, Optional, Union
import numpy as np
from sentence_transformers import SentenceTransformer
import openai
from openai import AsyncOpenAI

from ...core.events import EventEmitter
from ...core.config import config


class EmbeddingGenerator(EventEmitter):
    """嵌入生成器 - 支持多种模型"""
    
    def __init__(self):
        super().__init__()
        self.model = None
        self.openai_client = None
        self.provider = None
        self.model_name = None
        self._initialized = False
    
    async def initialize(self):
        """初始化嵌入模型"""
        if self._initialized:
            return
        
        try:
            # 从配置获取设置
            embedding_config = config.get("lightrag_engine.embedding", {})
            self.provider = embedding_config.get("provider", "sentence_transformers")
            self.model_name = embedding_config.get("model", "BAAI/bge-base-zh-v1.5")
            
            if self.provider == "openai":
                # 使用OpenAI嵌入
                api_key = config.get("external_services.openai.api_key")
                if not api_key:
                    raise ValueError("OpenAI API key not found")
                self.openai_client = AsyncOpenAI(api_key=api_key)
                
            elif self.provider == "sentence_transformers":
                # 使用SentenceTransformers
                self.model = SentenceTransformer(self.model_name)
                
            else:
                raise ValueError(f"Unsupported embedding provider: {self.provider}")
            
            self._initialized = True
            
            await self.emit("embedding_generator_initialized", {
                "provider": self.provider,
                "model": self.model_name
            })
            
        except Exception as e:
            await self.emit_error("embedding_generator_initialization", e)
            raise
    
    async def generate_embedding(self, text: str) -> List[float]:
        """生成单个文本的嵌入"""
        if not self._initialized:
            await self.initialize()
        
        try:
            if self.provider == "openai":
                return await self._generate_openai_embedding(text)
            elif self.provider == "sentence_transformers":
                return await self._generate_st_embedding(text)
            else:
                raise ValueError(f"Unsupported provider: {self.provider}")
                
        except Exception as e:
            await self.emit_error("generate_embedding", e)
            raise
    
    async def generate_batch_embeddings(
        self, 
        texts: List[str], 
        batch_size: int = 32
    ) -> List[List[float]]:
        """批量生成嵌入"""
        if not self._initialized:
            await self.initialize()
        
        try:
            all_embeddings = []
            
            # 分批处理
            for i in range(0, len(texts), batch_size):
                batch = texts[i:i + batch_size]
                
                if self.provider == "openai":
                    batch_embeddings = await self._generate_openai_batch_embeddings(batch)
                elif self.provider == "sentence_transformers":
                    batch_embeddings = await self._generate_st_batch_embeddings(batch)
                else:
                    raise ValueError(f"Unsupported provider: {self.provider}")
                
                all_embeddings.extend(batch_embeddings)
                
                # 进度报告
                await self.emit("batch_embedding_progress", {
                    "completed": len(all_embeddings),
                    "total": len(texts),
                    "batch_size": len(batch)
                })
            
            await self.emit("batch_embeddings_generated", {
                "total_texts": len(texts),
                "embedding_dimension": len(all_embeddings[0]) if all_embeddings else 0
            })
            
            return all_embeddings
            
        except Exception as e:
            await self.emit_error("generate_batch_embeddings", e)
            raise
    
    async def _generate_openai_embedding(self, text: str) -> List[float]:
        """生成OpenAI嵌入"""
        try:
            response = await self.openai_client.embeddings.create(
                model=self.model_name,
                input=text
            )
            return response.data[0].embedding
            
        except Exception as e:
            await self.emit_error("openai_embedding", e)
            raise
    
    async def _generate_openai_batch_embeddings(self, texts: List[str]) -> List[List[float]]:
        """批量生成OpenAI嵌入"""
        try:
            response = await self.openai_client.embeddings.create(
                model=self.model_name,
                input=texts
            )
            return [item.embedding for item in response.data]
            
        except Exception as e:
            await self.emit_error("openai_batch_embedding", e)
            raise
    
    async def _generate_st_embedding(self, text: str) -> List[float]:
        """生成SentenceTransformers嵌入"""
        try:
            # 在线程池中运行以避免阻塞
            loop = asyncio.get_event_loop()
            embedding = await loop.run_in_executor(
                None, 
                self.model.encode, 
                text
            )
            return embedding.tolist()
            
        except Exception as e:
            await self.emit_error("st_embedding", e)
            raise
    
    async def _generate_st_batch_embeddings(self, texts: List[str]) -> List[List[float]]:
        """批量生成SentenceTransformers嵌入"""
        try:
            # 在线程池中运行以避免阻塞
            loop = asyncio.get_event_loop()
            embeddings = await loop.run_in_executor(
                None,
                self.model.encode,
                texts
            )
            return embeddings.tolist()
            
        except Exception as e:
            await self.emit_error("st_batch_embedding", e)
            raise
    
    async def generate_query_embedding(self, query: str) -> List[float]:
        """生成查询嵌入（可能需要特殊处理）"""
        # 对于某些模型，查询和文档的嵌入方式可能不同
        # 这里先使用相同的方法，未来可以优化
        return await self.generate_embedding(query)
    
    async def compute_similarity(
        self, 
        embedding1: List[float], 
        embedding2: List[float]
    ) -> float:
        """计算两个嵌入的相似度"""
        try:
            # 转换为numpy数组
            vec1 = np.array(embedding1)
            vec2 = np.array(embedding2)
            
            # 计算余弦相似度
            dot_product = np.dot(vec1, vec2)
            norm1 = np.linalg.norm(vec1)
            norm2 = np.linalg.norm(vec2)
            
            if norm1 == 0 or norm2 == 0:
                return 0.0
            
            similarity = dot_product / (norm1 * norm2)
            return float(similarity)
            
        except Exception as e:
            await self.emit_error("compute_similarity", e)
            return 0.0
    
    async def find_most_similar(
        self, 
        query_embedding: List[float], 
        candidate_embeddings: List[List[float]],
        top_k: int = 5
    ) -> List[Dict[str, Any]]:
        """找到最相似的嵌入"""
        try:
            similarities = []
            
            for i, candidate in enumerate(candidate_embeddings):
                similarity = await self.compute_similarity(query_embedding, candidate)
                similarities.append({
                    "index": i,
                    "similarity": similarity
                })
            
            # 按相似度排序
            similarities.sort(key=lambda x: x["similarity"], reverse=True)
            
            return similarities[:top_k]
            
        except Exception as e:
            await self.emit_error("find_most_similar", e)
            return []
    
    def get_embedding_dimension(self) -> int:
        """获取嵌入维度"""
        if not self._initialized:
            return 0
        
        if self.provider == "sentence_transformers" and self.model:
            return self.model.get_sentence_embedding_dimension()
        elif self.provider == "openai":
            # OpenAI嵌入维度（根据模型而定）
            model_dimensions = {
                "text-embedding-ada-002": 1536,
                "text-embedding-3-small": 1536,
                "text-embedding-3-large": 3072
            }
            return model_dimensions.get(self.model_name, 1536)
        
        return 0
    
    async def optimize_text_for_embedding(self, text: str, max_length: int = 512) -> str:
        """优化文本以提高嵌入质量"""
        try:
            # 基本清理
            text = text.strip()
            
            # 移除过多的空白
            text = ' '.join(text.split())
            
            # 如果文本太长，进行智能截断
            if len(text) > max_length:
                # 尝试在句号处截断
                sentences = text.split('。')
                truncated = ""
                for sentence in sentences:
                    if len(truncated + sentence + '。') <= max_length:
                        truncated += sentence + '。'
                    else:
                        break
                
                if truncated:
                    text = truncated
                else:
                    # 如果没有找到合适的句号，直接截断
                    text = text[:max_length] + "..."
            
            return text
            
        except Exception as e:
            await self.emit_error("optimize_text_for_embedding", e)
            return text
    
    async def generate_weighted_embedding(
        self, 
        texts: List[str], 
        weights: List[float]
    ) -> List[float]:
        """生成加权嵌入"""
        try:
            if len(texts) != len(weights):
                raise ValueError("Texts and weights must have the same length")
            
            # 生成各个文本的嵌入
            embeddings = await self.generate_batch_embeddings(texts)
            
            # 计算加权平均
            weighted_embedding = np.zeros(len(embeddings[0]))
            total_weight = sum(weights)
            
            for embedding, weight in zip(embeddings, weights):
                weighted_embedding += np.array(embedding) * weight
            
            weighted_embedding = weighted_embedding / total_weight
            
            return weighted_embedding.tolist()
            
        except Exception as e:
            await self.emit_error("generate_weighted_embedding", e)
            raise