"""
嵌入引擎 (Embedding Engine)
负责调用SiliconFlow API将文本转化为向量表示
"""

import requests
import time
import logging
from typing import List, Dict, Any, Optional
import json

class EmbeddingEngine:
    """
    嵌入引擎，负责文本向量化
    """
    
    def __init__(self, api_key: str, model: str = "BAAI/bge-m3", 
                 api_url: str = "https://api.siliconflow.cn/v1/embeddings"):
        """
        初始化嵌入引擎
        
        Args:
            api_key: SiliconFlow API密钥
            model: 使用的嵌入模型
            api_url: API端点URL
        """
        self.api_key = api_key
        self.model = model
        self.api_url = api_url
        self.headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        # 设置日志
        self.logger = logging.getLogger(__name__)
        
        # API调用统计
        self.stats = {
            "total_requests": 0,
            "successful_requests": 0,
            "failed_requests": 0,
            "total_tokens": 0
        }
    
    def _call_api(self, text: str) -> Optional[List[float]]:
        """调用SiliconFlow API获取单个文本的向量"""
        try:
            # 检查文本长度，确保不超过API限制 - 使用更严格的限制
            estimated_tokens = self._estimate_token_count(text)
            if estimated_tokens > 4000:  # 使用更保守的限制，避免413错误
                self.logger.error(f"文本过长 (估算{estimated_tokens}个token)，超过安全限制(4000)")
                return None
            
            # 准备请求数据 - 按照sili.py的标准格式
            data = {
                "model": self.model,
                "input": text
            }
            
            self.logger.debug(f"🔄 Embedding API请求开始:")
            self.logger.debug(f"   URL: {self.api_url}")
            self.logger.debug(f"   Model: {self.model}")
            self.logger.debug(f"   Input text length: {len(text)}")
            self.logger.debug(f"   Headers: {self.headers}")
            self.logger.debug(f"   Request data: {json.dumps(data, ensure_ascii=False, indent=2)}")
            
            # 发送请求
            self.logger.debug(f"🚀 发送Embedding API请求...")
            response = requests.post(
                self.api_url,
                headers=self.headers,
                json=data,
                timeout=30
            )
            
            self.logger.debug(f"📥 收到Embedding响应:")
            self.logger.debug(f"   Status Code: {response.status_code}")
            self.logger.debug(f"   Headers: {dict(response.headers)}")
            
            # 检查响应状态
            if response.status_code != 200:
                self.logger.error(f"❌ Embedding API请求失败:")
                self.logger.error(f"   Status Code: {response.status_code}")
                self.logger.error(f"   Response: {response.text}")
                
                # 特殊处理不同的错误码
                if response.status_code == 413:
                    self.logger.error("   错误原因: 请求体过大，文本可能超过API限制")
                elif response.status_code == 502:
                    self.logger.error("   错误原因: 网关错误，可能是服务暂时不可用")
                elif response.status_code == 400:
                    self.logger.error("   错误原因: 请求格式错误")
                
                response.raise_for_status()
            
            # 解析响应
            try:
                response_json = response.json()
                self.logger.debug(f"   Response JSON: {json.dumps(response_json, ensure_ascii=False, indent=2)}")
            except Exception as e:
                self.logger.error(f"❌ 解析响应JSON失败: {e}")
                self.logger.error(f"   Response Text: {response.text}")
                return None
            
            self.logger.debug(f"✅ Embedding API请求成功")
            
            # 检查响应格式
            if 'data' not in response_json:
                self.logger.error(f"❌ 响应格式错误，缺少'data'字段: {response_json}")
                return None
            
            # 提取向量数据 - 按照sili.py的响应格式处理
            data_items = response_json.get('data', [])
            self.logger.debug(f"   Data items count: {len(data_items)}")
            
            if not data_items:
                self.logger.error("❌ 响应数据为空")
                return None
            
            # 获取第一个（也是唯一的）embedding
            first_item = data_items[0]
            if 'embedding' not in first_item:
                self.logger.error(f"❌ 数据项缺少'embedding'字段: {first_item}")
                return None
            
            embedding = first_item.get('embedding', [])
            self.logger.debug(f"   Embedding 维度: {len(embedding)}")
            
            # 更新统计信息
            self.stats['total_requests'] += 1
            self.stats['successful_requests'] += 1
            usage = response_json.get('usage', {})
            total_tokens = usage.get('total_tokens', 0)
            self.stats['total_tokens'] += total_tokens
            
            self.logger.debug(f"   Token使用量: {total_tokens}")
            self.logger.debug(f"   累计请求数: {self.stats['total_requests']}")
            self.logger.debug(f"   累计Token数: {self.stats['total_tokens']}")
            
            return embedding
            
        except requests.exceptions.RequestException as e:
            self.logger.error(f"❌ Embedding API请求异常: {e}")
            self.stats['failed_requests'] += 1
            if hasattr(e, 'response') and e.response is not None:
                self.logger.error(f"   Error Status Code: {e.response.status_code}")
                self.logger.error(f"   Error Response: {e.response.text}")
            return None
        except Exception as e:
            self.logger.error(f"❌ Embedding处理响应失败: {e}")
            self.stats['failed_requests'] += 1
            return None

    def _estimate_token_count(self, text: str) -> int:
        """
        估算文本的token数量
        使用更保守和准确的估算方法，避免413错误
        
        Args:
            text: 输入文本
            
        Returns:
            估算的token数量
        """
        if not text:
            return 0
        
        # 更保守的token估算策略
        # 1. 中文字符：按1.5个token计算（考虑某些字符可能被分解）
        chinese_chars = len([c for c in text if '\u4e00' <= c <= '\u9fff'])
        chinese_tokens = int(chinese_chars * 1.5)
        
        # 2. 英文内容：按字符数/3.5计算（考虑标点符号和特殊字符）
        non_chinese_chars = len([c for c in text if not ('\u4e00' <= c <= '\u9fff')])
        english_tokens = int(non_chinese_chars / 3.5)
        
        # 3. 添加额外的安全边距（20%）
        total_tokens = int((chinese_tokens + english_tokens) * 1.2)
        
        self.logger.debug(f"Token估算: 中文字符={chinese_chars}({chinese_tokens}tokens), "
                         f"其他字符={non_chinese_chars}({english_tokens}tokens), "
                         f"总计={total_tokens}tokens")
        
        return total_tokens
    
    def _split_text_by_tokens(self, text: str, max_tokens: int = 3500) -> List[str]:
        """
        按token数量分割文本，使用更可靠的分割策略
        
        Args:
            text: 输入文本
            max_tokens: 每个分块的最大token数（默认3500，留足安全边距）
            
        Returns:
            分割后的文本列表
        """
        if self._estimate_token_count(text) <= max_tokens:
            return [text]
        
        self.logger.info(f"文本过长，开始分块处理，目标每块最大{max_tokens}tokens")
        
        chunks = []
        
        # 第一步：按段落分割
        paragraphs = text.split('\n\n')
        if len(paragraphs) == 1:
            # 如果没有段落分隔符，按单个换行符分割
            paragraphs = text.split('\n')
        
        current_chunk = ""
        
        for paragraph in paragraphs:
            if not paragraph.strip():
                continue
            
            paragraph = paragraph.strip()
            paragraph_tokens = self._estimate_token_count(paragraph)
            
            # 如果单个段落就超过限制，需要进一步分割
            if paragraph_tokens > max_tokens:
                # 先保存当前分块
                if current_chunk:
                    chunks.append(current_chunk.strip())
                    current_chunk = ""
                
                # 分割长段落
                sub_chunks = self._split_long_paragraph(paragraph, max_tokens)
                chunks.extend(sub_chunks)
            else:
                # 尝试添加到当前分块
                test_chunk = current_chunk + "\n\n" + paragraph if current_chunk else paragraph
                test_tokens = self._estimate_token_count(test_chunk)
                
                if test_tokens <= max_tokens:
                    current_chunk = test_chunk
                else:
                    # 当前分块已满，开始新分块
                    if current_chunk:
                        chunks.append(current_chunk.strip())
                    current_chunk = paragraph
        
        # 添加最后一个分块
        if current_chunk:
            chunks.append(current_chunk.strip())
        
        # 验证所有分块都在安全范围内
        validated_chunks = []
        for i, chunk in enumerate(chunks):
            chunk_tokens = self._estimate_token_count(chunk)
            if chunk_tokens > max_tokens:
                self.logger.warning(f"分块{i+1}仍然过长({chunk_tokens}tokens)，进行强制分割")
                # 强制按字符分割
                sub_chunks = self._force_split_chunk(chunk, max_tokens)
                validated_chunks.extend(sub_chunks)
            else:
                validated_chunks.append(chunk)
        
        self.logger.info(f"文本分块完成，共生成{len(validated_chunks)}个分块")
        for i, chunk in enumerate(validated_chunks):
            tokens = self._estimate_token_count(chunk)
            self.logger.debug(f"分块{i+1}: {tokens}tokens, 长度{len(chunk)}字符")
        
        return validated_chunks
    
    def _split_long_paragraph(self, paragraph: str, max_tokens: int) -> List[str]:
        """分割长段落"""
        # 按句子分割
        sentences = []
        for delimiter in ['。', '！', '？', '.', '!', '?']:
            if delimiter in paragraph:
                parts = paragraph.split(delimiter)
                sentences = [part + delimiter for part in parts[:-1] if part.strip()]
                if parts[-1].strip():  # 最后一部分可能没有分隔符
                    sentences.append(parts[-1].strip())
                break
        
        if not sentences:
            # 如果没有句子分隔符，按固定长度分割
            return self._force_split_chunk(paragraph, max_tokens)
        
        chunks = []
        current_chunk = ""
        
        for sentence in sentences:
            if not sentence.strip():
                continue
            
            test_chunk = current_chunk + sentence if current_chunk else sentence
            test_tokens = self._estimate_token_count(test_chunk)
            
            if test_tokens <= max_tokens:
                current_chunk = test_chunk
            else:
                if current_chunk:
                    chunks.append(current_chunk.strip())
                
                # 检查单个句子是否过长
                sentence_tokens = self._estimate_token_count(sentence)
                if sentence_tokens > max_tokens:
                    # 强制分割长句子
                    sub_chunks = self._force_split_chunk(sentence, max_tokens)
                    chunks.extend(sub_chunks)
                    current_chunk = ""
                else:
                    current_chunk = sentence
        
        if current_chunk:
            chunks.append(current_chunk.strip())
        
        return chunks
    
    def _force_split_chunk(self, text: str, max_tokens: int) -> List[str]:
        """强制按字符长度分割文本"""
        # 估算每个token对应的字符数
        estimated_chars_per_token = len(text) / max(self._estimate_token_count(text), 1)
        chunk_size = int(max_tokens * estimated_chars_per_token * 0.8)  # 留20%安全边距
        
        chunks = []
        for i in range(0, len(text), chunk_size):
            chunk = text[i:i + chunk_size]
            if chunk.strip():
                chunks.append(chunk.strip())
        
        return chunks

    def embed_text(self, text: str, max_retries: int = 3, 
                   retry_delay: float = 1.0) -> Optional[List[float]]:
        """
        将单个文本转化为向量
        
        Args:
            text: 要向量化的文本
            max_retries: 最大重试次数
            retry_delay: 重试延迟（秒）
            
        Returns:
            向量列表，失败时返回None
        """
        if not text or not text.strip():
            self.logger.warning("空文本，跳过向量化")
            return None
        
        text = text.strip()
        estimated_tokens = self._estimate_token_count(text)
        
        # 使用更保守的token限制，确保不会触发413错误
        max_safe_tokens = 3500  # 降低到3500，留足安全边距
        
        # 如果文本过长，分块处理
        if estimated_tokens > max_safe_tokens:
            self.logger.info(f"文本过长({estimated_tokens} tokens)，进行分块处理")
            chunks = self._split_text_by_tokens(text, max_tokens=max_safe_tokens)
            
            if not chunks:
                self.logger.error("文本分块失败")
                return None
            
            self.logger.info(f"文本已分割为{len(chunks)}个分块")
            
            # 对每个分块进行向量化
            chunk_embeddings = []
            successful_chunks = 0
            
            for i, chunk in enumerate(chunks):
                chunk_tokens = self._estimate_token_count(chunk)
                self.logger.debug(f"处理分块 {i+1}/{len(chunks)}，预估{chunk_tokens}tokens")
                
                # 再次验证分块大小
                if chunk_tokens > max_safe_tokens:
                    self.logger.warning(f"分块{i+1}仍然过长({chunk_tokens}tokens)，跳过处理")
                    continue
                
                embedding = self._embed_single_text(chunk)
                if embedding is not None:
                    chunk_embeddings.append(embedding)
                    successful_chunks += 1
                    self.logger.debug(f"分块 {i+1} 向量化成功")
                else:
                    self.logger.warning(f"分块 {i+1} 向量化失败")
            
            if not chunk_embeddings:
                self.logger.error("所有分块向量化都失败")
                return None
            
            # 计算平均向量
            avg_embedding = [sum(values) / len(chunk_embeddings) 
                           for values in zip(*chunk_embeddings)]
            
            success_rate = (successful_chunks / len(chunks)) * 100
            self.logger.info(f"分块处理完成，共{len(chunks)}块，成功{successful_chunks}块，成功率{success_rate:.1f}%")
            return avg_embedding
        else:
            # 文本长度在安全范围内，直接处理
            self.logger.debug(f"文本长度适中({estimated_tokens} tokens)，直接处理")
            return self._embed_single_text(text)
    
    def _embed_single_text(self, text: str, max_retries: int = 3, 
                          retry_delay: float = 1.0) -> Optional[List[float]]:
        """
        向量化单个文本（内部方法）
        
        Args:
            text: 要向量化的文本
            max_retries: 最大重试次数
            retry_delay: 重试延迟（秒）
            
        Returns:
            向量列表，失败时返回None
        """
        for attempt in range(max_retries + 1):
            try:
                embedding = self._call_api(text)
                if embedding and len(embedding) > 0:
                    self.logger.debug(f"成功向量化文本，向量维度: {len(embedding)}")
                    return embedding
                else:
                    self.logger.error(f"API响应格式异常: 空向量")
                    if attempt < max_retries:
                        self.logger.info(f"重试 {attempt + 1}/{max_retries}")
                        time.sleep(retry_delay * (2 ** attempt))  # 指数退避
                    else:
                        return None
                    
            except Exception as e:
                self.logger.warning(f"向量化尝试 {attempt + 1}/{max_retries + 1} 失败: {e}")
                
                if attempt < max_retries:
                    time.sleep(retry_delay * (2 ** attempt))  # 指数退避
                else:
                    self.logger.error(f"向量化失败，已达到最大重试次数: {e}")
                    return None
        
        return None
    
    def embed_batch(self, texts: List[str], batch_size: int = 10, 
                    delay_between_batches: float = 0.5) -> List[Optional[List[float]]]:
        """
        批量向量化文本
        
        Args:
            texts: 文本列表
            batch_size: 批处理大小
            delay_between_batches: 批次间延迟（秒）
            
        Returns:
            向量列表，失败的项为None
        """
        results = []
        total_batches = (len(texts) + batch_size - 1) // batch_size
        
        self.logger.info(f"开始批量向量化，共 {len(texts)} 个文本，分 {total_batches} 批处理")
        
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            batch_num = i // batch_size + 1
            
            self.logger.info(f"处理第 {batch_num}/{total_batches} 批，包含 {len(batch_texts)} 个文本")
            
            batch_results = []
            for text in batch_texts:
                embedding = self.embed_text(text)
                batch_results.append(embedding)
            
            results.extend(batch_results)
            
            # 批次间延迟，避免API限流
            if i + batch_size < len(texts):
                time.sleep(delay_between_batches)
        
        success_count = sum(1 for r in results if r is not None)
        self.logger.info(f"批量向量化完成，成功: {success_count}/{len(texts)}")
        
        return results
    
    def get_stats(self) -> Dict[str, Any]:
        """
        获取API调用统计信息
        
        Returns:
            统计信息字典
        """
        return {
            **self.stats,
            "success_rate": (
                self.stats["successful_requests"] / max(self.stats["total_requests"], 1)
            ) * 100
        }
    
    def reset_stats(self):
        """重置统计信息"""
        self.stats = {
            "total_requests": 0,
            "successful_requests": 0,
            "failed_requests": 0,
            "total_tokens": 0
        }
    
    def test_connection(self) -> bool:
        """
        测试API连接
        
        Returns:
            连接是否成功
        """
        test_text = "测试连接"
        result = self.embed_text(test_text)
        return result is not None


if __name__ == "__main__":
    # 测试代码
    import os
    
    # 设置日志
    logging.basicConfig(level=logging.INFO)
    
    # 从环境变量获取API密钥，或使用默认值
    api_key = os.getenv("SILICONFLOW_API_KEY", "sk-umjpwhgrwtwcrifwfrczofcfeprfvhhcrvvpnbhkwrkvjnux")
    
    engine = EmbeddingEngine(api_key)
    
    # 测试连接
    if engine.test_connection():
        print("✅ API连接测试成功")
        print(f"统计信息: {engine.get_stats()}")
    else:
        print("❌ API连接测试失败")