"""
翻译服务模块
支持多种翻译服务：百度翻译、DeeplX、AI翻译
"""
import os
import re
import json
import hashlib
import requests
import time
from abc import ABC, abstractmethod
from typing import Optional, List, Dict, Tuple
import html
from concurrent.futures import ThreadPoolExecutor, as_completed
from lxml import etree, html as lxml_html
from sqlalchemy.orm import Session
from api.utils.logger import get_logger
from api.utils.cache import cache
from api.model.translation_config import TranslationConfig

# 初始化日志
logger = get_logger("translation")

class BaseTranslator(ABC):
    """翻译器基类"""
    
    @abstractmethod
    def translate_batch(self, texts: List[str], max_retries: int = 2, retry_delay: int = 2) -> List[Optional[str]]:
        """
        批量翻译文本（带重试机制）
        
        Args:
            texts: 要翻译的文本列表
            max_retries: 最大重试次数
            retry_delay: 重试间隔（秒）
            
        Returns:
            翻译后的文本列表，与输入一一对应
        """
        pass

class BaiduTranslator(BaseTranslator):
    """百度翻译器"""
    
    def __init__(self, app_id: str, secret_key: str):
        self.app_id = app_id
        self.secret_key = secret_key
        self.api_url = "https://fanyi-api.baidu.com/api/trans/vip/translate"
        self.max_chars_per_request = 6000  # 百度API限制
    
    def translate_batch(self, texts: List[str], max_retries: int = 2, retry_delay: int = 2) -> List[Optional[str]]:
        """批量翻译，支持分批和重试"""
        if not texts:
            return []
        
        results = [None] * len(texts)
        
        # 分批处理
        batches = self._split_into_batches(texts)
        
        # 使用线程池并发翻译
        with ThreadPoolExecutor(max_workers=3) as executor:
            future_to_batch = {
                executor.submit(self._translate_single_batch, batch_texts, batch_indices, max_retries, retry_delay): (batch_texts, batch_indices)
                for batch_texts, batch_indices in batches
            }
            
            for future in as_completed(future_to_batch):
                batch_results = future.result()
                if batch_results:
                    for idx, translated_text in batch_results:
                        results[idx] = translated_text
        
        return results
    
    def _split_into_batches(self, texts: List[str]) -> List[Tuple[List[str], List[int]]]:
        """
        将文本分批，每批不超过字符限制
        从前到后顺序合并，尽可能多地放入同一批次，以保持翻译的语义连贯性
        """
        batches = []
        current_batch = []
        current_indices = []
        current_size = 0
        
        for idx, text in enumerate(texts):
            text_size = len(text.encode('utf-8'))
            
            # 如果单个文本超过限制，截断并单独处理（避免丢失）
            if text_size > self.max_chars_per_request:
                # 先保存当前批次
                if current_batch:
                    batches.append((current_batch, current_indices))
                    current_batch = []
                    current_indices = []
                    current_size = 0
                
                # 超长文本单独作为一批（API会处理截断）
                batches.append(([text], [idx]))
                logger.warning(f"文本 {idx} 超过{self.max_chars_per_request}字符，单独处理")
                continue
            
            # 计算加入当前文本后的总大小（+1是换行符）
            new_size = current_size + text_size + (1 if current_batch else 0)
            
            # 如果加入后会超过限制，先保存当前批次，开始新批次
            if new_size > self.max_chars_per_request and current_batch:
                batches.append((current_batch, current_indices))
                current_batch = []
                current_indices = []
                current_size = 0
            
            # 将文本加入当前批次
            current_batch.append(text)
            current_indices.append(idx)
            current_size = current_size + text_size + (1 if len(current_batch) > 1 else 0)
        
        # 保存最后一批
        if current_batch:
            batches.append((current_batch, current_indices))
        
        return batches
    
    def _translate_single_batch(self, texts: List[str], indices: List[int], max_retries: int, retry_delay: int) -> List[Tuple[int, str]]:
        """翻译单个批次，带重试机制"""
        import random
        
        for attempt in range(max_retries + 1):
            try:
                # 使用\n连接多段文本
                combined_text = "\n".join(texts)
                
                # 生成随机数和签名
                salt = str(random.randint(32768, 65536))
                sign_str = f"{self.app_id}{combined_text}{salt}{self.secret_key}"
                sign = hashlib.md5(sign_str.encode('utf-8')).hexdigest()
                
                # 构建POST请求数据
                data = {
                    'q': combined_text,
                    'from': 'auto',
                    'to': 'zh',
                    'appid': self.app_id,
                    'salt': salt,
                    'sign': sign
                }
                
                headers = {'Content-Type': 'application/x-www-form-urlencoded'}
                
                # 发送请求
                response = requests.post(self.api_url, data=data, headers=headers, timeout=30)
                response.raise_for_status()
                result = response.json()
                
                # 检查错误
                if 'error_code' in result:
                    error_msg = f"{result.get('error_code')} - {result.get('error_msg', '未知错误')}"
                    logger.error(f"百度翻译API错误: {error_msg}")
                    
                    if attempt < max_retries:
                        time.sleep(retry_delay)
                        continue
                    return []
                
                # 提取翻译结果
                if 'trans_result' in result:
                    translated_texts = [item['dst'] for item in result['trans_result']]
                    return list(zip(indices, translated_texts))
                
                return []
                
            except Exception as e:
                logger.error(f"翻译批次失败 (尝试 {attempt + 1}/{max_retries + 1}): {e}")
                if attempt < max_retries:
                    time.sleep(retry_delay)
                else:
                    return []
        
        return []


class DeeplXTranslator(BaseTranslator):
    """DeeplX翻译器（使用批量翻译逻辑）"""
    
    def __init__(self, api_url: str):
        self.api_url = api_url
        self.max_chars_per_request = 2000  # DeeplX API限制
    
    def translate_batch(self, texts: List[str], max_retries: int = 2, retry_delay: int = 2) -> List[Optional[str]]:
        """批量翻译，支持分批和重试"""
        if not texts:
            return []
        
        results = [None] * len(texts)
        
        # 分批处理
        batches = self._split_into_batches(texts)
        
        # 使用线程池并发翻译
        with ThreadPoolExecutor(max_workers=3) as executor:
            future_to_batch = {
                executor.submit(self._translate_single_batch, batch_texts, batch_indices, max_retries, retry_delay): (batch_texts, batch_indices)
                for batch_texts, batch_indices in batches
            }
            
            for future in as_completed(future_to_batch):
                batch_results = future.result()
                if batch_results:
                    for idx, translated_text in batch_results:
                        results[idx] = translated_text
        
        return results
    
    def _split_into_batches(self, texts: List[str]) -> List[Tuple[List[str], List[int]]]:
        """
        将文本分批，每批不超过字符限制
        从前到后顺序合并，尽可能多地放入同一批次
        """
        batches = []
        current_batch = []
        current_indices = []
        current_size = 0
        
        for idx, text in enumerate(texts):
            text_size = len(text.encode('utf-8'))
            
            # 如果单个文本超过限制，单独处理
            if text_size > self.max_chars_per_request:
                # 先保存当前批次
                if current_batch:
                    batches.append((current_batch, current_indices))
                    current_batch = []
                    current_indices = []
                    current_size = 0
                
                # 超长文本单独作为一批
                batches.append(([text], [idx]))
                logger.warning(f"文本 {idx} 超过{self.max_chars_per_request}字符，单独处理")
                continue
            
            # 计算加入当前文本后的总大小（+1是换行符）
            new_size = current_size + text_size + (1 if current_batch else 0)
            
            # 如果加入后会超过限制，先保存当前批次，开始新批次
            if new_size > self.max_chars_per_request and current_batch:
                batches.append((current_batch, current_indices))
                current_batch = []
                current_indices = []
                current_size = 0
            
            # 将文本加入当前批次
            current_batch.append(text)
            current_indices.append(idx)
            current_size = current_size + text_size + (1 if len(current_batch) > 1 else 0)
        
        # 保存最后一批
        if current_batch:
            batches.append((current_batch, current_indices))
        
        return batches
    
    def _translate_single_batch(self, texts: List[str], indices: List[int], max_retries: int, retry_delay: int) -> List[Tuple[int, str]]:
        """翻译单个批次，带重试机制"""
        for attempt in range(max_retries + 1):
            try:
                # 使用\n连接多段文本
                combined_text = "\n".join(texts)
                
                payload = {
                    "text": combined_text,
                    "source_lang": "AUTO",
                    "target_lang": "ZH"
                }
                
                response = requests.post(
                    self.api_url,
                    json=payload,
                    headers={'Content-Type': 'application/json'},
                    timeout=30
                )
                response.raise_for_status()
                result = response.json()
                
                if result.get('code') == 200 and 'data' in result:
                    translated_combined = result['data']
                    
                    # 按换行符拆分翻译结果
                    translated_parts = translated_combined.split('\n')
                    
                    # 检查拆分结果数量是否匹配
                    if len(translated_parts) != len(texts):
                        logger.error(f"DeeplX翻译结果拆分数量不匹配：期望 {len(texts)} 段，实际得到 {len(translated_parts)} 段")
                        # 直接返回失败，不进行降级处理，避免缓存失败结果
                        return []
                    
                    # 构建结果列表
                    return list(zip(indices, translated_parts))
                
                if attempt < max_retries:
                    time.sleep(retry_delay)
                    
            except Exception as e:
                logger.error(f"DeeplX翻译批次失败 (尝试 {attempt + 1}/{max_retries + 1}): {e}")
                if attempt < max_retries:
                    time.sleep(retry_delay)
        
        return []
    
    def _translate_single_text(self, text: str, max_retries: int, retry_delay: int) -> Optional[str]:
        """翻译单个文本（降级方案）"""
        for attempt in range(max_retries + 1):
            try:
                payload = {
                    "text": text,
                    "source_lang": "AUTO",
                    "target_lang": "ZH"
                }
                
                response = requests.post(
                    self.api_url,
                    json=payload,
                    headers={'Content-Type': 'application/json'},
                    timeout=10
                )
                response.raise_for_status()
                result = response.json()
                
                if result.get('code') == 200 and 'data' in result:
                    return result['data']
                
                if attempt < max_retries:
                    time.sleep(retry_delay)
                    
            except Exception as e:
                logger.error(f"DeeplX单个文本翻译失败 (尝试 {attempt + 1}/{max_retries + 1}): {e}")
                if attempt < max_retries:
                    time.sleep(retry_delay)
        
        return None

class AITranslator(BaseTranslator):
    """AI翻译器（OpenAI格式）- 使用JSON数据结构+多线程翻译"""
    
    def __init__(self, api_url: str, api_key: str, model: str, prompt: str = None):
        self.api_url = self._process_api_url(api_url)
        self.api_key = api_key
        self.model = model
        self.custom_prompt = prompt  # 保存用户自定义的提示词
        self.target_language = "Chinese"  # 目标语言
        self.max_chars_per_request = 1500  # AI翻译限制，每批最多1500字符
        
        # 使用JSON模式的翻译提示词模板（如果用户没有自定义提示词）
        self.TRANSLATION_PROMPT_TEMPLATE = """You are a JSON translation service.
            Your ONLY task is to translate the string values in the JSON object provided in <translate_input> into {target_language}.

            **CRITICAL RULES:**
            1.  The input is a JSON object. Keys are segment IDs (e.g., "s0", "s1"). Values are text.
            2.  Your output MUST be *ONLY* a valid JSON object. Do not use Markdown (```json).
            3.  **DO NOT TRANSLATE KEYS:** The output JSON MUST contain the *exact same keys* as the input.
            4.  **STRICT 1:1 MAPPING (CRITICAL):**
                * Translate the *value* for each key independently.
                * The translation for the value of "s1" MUST go into the output value for "s1".
                * The translation for "s2" MUST go into "s2", and so on.
                * **DO NOT MERGE ACROSS KEYS.** Never put the translation of "s9" into the value of "s8". Each key is a separate task.
            5.  **DO NOT DROP KEYS (CRITICAL):**
                * If the input JSON has 20 keys (from "s0" to "s19"), the output JSON MUST also have all 20 keys (from "s0" to "s19").
                * You MUST process *all* keys, including the last one.
            6.  **FRAGMENT RULE:** If an input value is a fragment (e.g., ", or"), its translation MUST also be a fragment (e.g., "，或"). Do not try to make it grammatically complete by merging it with other keys.
            7.  **NO EXTRA TEXT:** Do not include any explanations.

            <translate_input>
            {text_json}
            </translate_input>

            Translate the *values* in the JSON above into {target_language} and return *only* the new JSON object. You MUST follow all CRITICAL RULES, especially 4, 5, and 6.
        """
    
    def _process_api_url(self, url: str) -> str:
        """
        处理API URL，根据规则添加后缀
        规则：
        1. 如果以#结尾，去掉#并直接使用（强制使用输入地址）
        2. 如果以/结尾，去掉/并忽略v1版本
        3. 如果以/v1结尾，不添加v1
        4. 其他情况添加/v1
        5. 最后统一添加/chat/completions
        """
        url = url.strip()
        
        # 规则1: 以#结尾，强制使用输入地址
        if url.endswith('#'):
            return url[:-1]  # 去掉#直接返回
        
        # 规则2: 以/结尾，忽略v1版本
        if url.endswith('/'):
            url = url.rstrip('/')
            return f"{url}/chat/completions"
        
        # 规则3: 以/v1结尾，不添加v1
        if url.endswith('/v1'):
            return f"{url}/chat/completions"
        
        # 规则4: 其他情况添加/v1
        return f"{url}/v1/chat/completions"
    
    def translate_batch(self, texts: List[str], max_retries: int = 2, retry_delay: int = 2) -> List[Optional[str]]:
        """批量翻译，支持分批和多线程"""
        if not texts:
            return []
        
        results = [None] * len(texts)
        
        # 分批处理
        batches = self._split_into_batches(texts)
        
        # 使用线程池并发翻译
        with ThreadPoolExecutor(max_workers=3) as executor:
            future_to_batch = {
                executor.submit(self._translate_single_batch, batch_texts, batch_indices, max_retries, retry_delay): (batch_texts, batch_indices)
                for batch_texts, batch_indices in batches
            }
            
            for future in as_completed(future_to_batch):
                batch_results = future.result()
                if batch_results:
                    for idx, translated_text in batch_results:
                        results[idx] = translated_text
        
        return results
    
    def _split_into_batches(self, texts: List[str]) -> List[Tuple[List[str], List[int]]]:
        """
        将文本分批，每批不超过字符限制
        从前到后顺序合并，尽可能多地放入同一批次
        考虑JSON格式的额外开销（键名、引号、逗号等）
        """
        batches = []
        current_batch = []
        current_indices = []
        current_size = 0
        
        # JSON额外开销估算：{"s0":"text"} 约为 10字符 + key长度 + value长度
        json_overhead_per_item = 15  # 保守估计每个键值对的JSON格式开销
        
        for idx, text in enumerate(texts):
            text_size = len(text.encode('utf-8'))
            # 估算该文本在JSON中的实际大小
            estimated_json_size = text_size + json_overhead_per_item + len(str(idx))
            
            # 如果单个文本超过限制，单独处理
            if estimated_json_size > self.max_chars_per_request:
                # 先保存当前批次
                if current_batch:
                    batches.append((current_batch, current_indices))
                    current_batch = []
                    current_indices = []
                    current_size = 0
                
                # 超长文本单独作为一批
                batches.append(([text], [idx]))
                logger.warning(f"文本 {idx} 超过{self.max_chars_per_request}字符，单独处理")
                continue
            
            # 计算加入当前文本后的总大小
            new_size = current_size + estimated_json_size + (1 if current_batch else 0)  # +1 是逗号
            
            # 如果加入后会超过限制，先保存当前批次，开始新批次
            if new_size > self.max_chars_per_request and current_batch:
                batches.append((current_batch, current_indices))
                current_batch = []
                current_indices = []
                current_size = 0
            
            # 将文本加入当前批次
            current_batch.append(text)
            current_indices.append(idx)
            current_size = current_size + estimated_json_size + (1 if len(current_batch) > 1 else 0)
        
        # 保存最后一批
        if current_batch:
            batches.append((current_batch, current_indices))
        
        return batches
    
    def _translate_single_batch(self, texts: List[str], indices: List[int], max_retries: int, retry_delay: int) -> List[Tuple[int, str]]:
        """翻译单个批次，带重试机制（使用JSON模式）"""
        
        # 修改点 1: 将文本列表构建为JSON对象
        # 使用 "s0", "s1", "s2"... 作为键来保证顺序
        input_dict = {f"s{i}": text for i, text in enumerate(texts)}
        try:
            combined_text_json = json.dumps(input_dict, ensure_ascii=False)
        except Exception as e:
            error_msg = f"无法创建输入JSON: {e}"
            logger.error(f"[AI翻译] {error_msg}")
            # 返回空列表，表示翻译失败
            return []

        for attempt in range(max_retries + 1):
            try:
                # 使用用户自定义提示词或默认的翻译提示词模板
                # 只有当自定义提示词存在且不为空时才使用
                if self.custom_prompt and self.custom_prompt.strip():
                    # 用户自定义提示词，替换占位符
                    prompt_content = self.custom_prompt.replace('{text_json}', combined_text_json).replace('{target_language}', self.target_language)
                else:
                    # 使用默认的翻译提示词模板
                    prompt_content = self.TRANSLATION_PROMPT_TEMPLATE.format(
                        target_language=self.target_language,
                        text_json=combined_text_json
                    )
                
                messages = [
                    {"role": "user", "content": prompt_content}
                ]
                
                payload = {
                    "model": self.model,
                    "messages": messages,
                    "temperature": 0.3  # 对于JSON输出，更低的温度(0.1-0.3)更稳定
                }
                
                response = requests.post(
                    self.api_url,
                    json=payload,
                    headers={
                        'Content-Type': 'application/json',
                        'Authorization': f'Bearer {self.api_key}'
                    },
                    timeout=60
                )
                response.raise_for_status()
                result = response.json()
                
                if 'choices' in result and len(result['choices']) > 0:
                    translated_combined = result['choices'][0]['message']['content'].strip()
                    
                    # 清理可能存在的标签 (例如 ```json ... ```)
                    translated_combined = self._clean_translation_result(translated_combined)
                    
                    # 修改点 2: 将输出解析为JSON对象
                    try:
                        # 鲁棒的JSON清理：从第一个 { 找到最后一个 }
                        start_index = translated_combined.find('{')
                        end_index = translated_combined.rfind('}')
                        if start_index == -1 or end_index == -1 or end_index < start_index:
                            raise json.JSONDecodeError("Valid JSON object not found", translated_combined, 0)
                        
                        json_string = translated_combined[start_index : end_index + 1]
                        parsed_json = json.loads(json_string)
                        
                    except Exception as json_err:
                        error_msg = f"无法解析AI返回的JSON: {json_err} - 内容: {translated_combined}"
                        logger.error(f"[AI翻译] {error_msg}")
                        # 触发重试
                        if attempt < max_retries:
                            time.sleep(retry_delay)
                            continue
                        else:
                            # 最后一次重试也失败，返回空列表
                            return []

                    # 修改点 3: 检查键和数量是否匹配
                    if len(parsed_json) != len(texts) or set(parsed_json.keys()) != set(input_dict.keys()):
                        logger.error(f"【预期：{len(texts)}/{len(parsed_json)} 模型：{self.model}】【原文】 {combined_text_json}\n【译文】 {json_string}")
                        # 直接返回失败，不进行重试，避免缓存失败结果
                        return []

                    # 修改点 4: 按我们输入的顺序从JSON中提取翻译
                    translated_parts = [parsed_json[key] for key in input_dict.keys()]
                    
                    # 成功！
                    return list(zip(indices, translated_parts))
                
                # AI返回了空choices
                error_msg = "AI响应无choices"
                logger.error(f"[AI翻译] {error_msg}")
                if attempt < max_retries:
                    time.sleep(retry_delay)
                    
            except Exception as e:
                error_msg = f"批次翻译失败: {e}"
                logger.error(f"[AI翻译（{self.model}）] {error_msg} (尝试 {attempt + 1}/{max_retries + 1})")
                if attempt < max_retries:
                    time.sleep(retry_delay)

        # 所有重试都失败，返回空列表
        logger.error(f"[AI翻译] 批次翻译在 {max_retries + 1} 次尝试后最终失败")
        return []
    
    
    def _clean_translation_result(self, text: str) -> str:
        """
        清理翻译结果，移除可能的格式标记
        针对JSON模式，主要清理：
        1. Markdown代码块标记（```json ... ```）
        2. <translate_input> 标签
        3. 其他前缀或说明文字
        """
        if not text:
            return text
        
        original_text = text
        
        # 移除 Markdown 代码块标记（```json ... ``` 或 ``` ... ```）
        text = re.sub(r'^```json\s*', '', text, flags=re.IGNORECASE)
        text = re.sub(r'^```\s*', '', text)
        text = re.sub(r'\s*```$', '', text)
        
        # 尝试提取 <translate_input> 标签内的内容（忽略大小写）
        match = re.search(r'<translate_input>\s*(.*?)\s*</translate_input>', text, flags=re.IGNORECASE | re.DOTALL)
        if match:
            text = match.group(1).strip()
        else:
            # 如果没有完整的标签对，尝试移除单独的开始或结束标签
            text = re.sub(r'<translate_input>\s*', '', text, flags=re.IGNORECASE)
            text = re.sub(r'\s*</translate_input>', '', text, flags=re.IGNORECASE)
        
        # 移除可能的说明文字前缀
        text = re.sub(r'^(Here is the translated JSON|Translated JSON|Translation):\s*', '', text, flags=re.IGNORECASE)
        
        # 最终清理多余空白
        text = text.strip()
        
        # 如果清理后为空，返回原文
        if not text:
            logger.warning(f"清理后结果为空，返回原始文本")
            return original_text.strip()
        
        return text

class TranslationService:
    """翻译服务统一接口"""
    
    @staticmethod
    def is_mostly_chinese(text: str) -> bool:
        """
        判断文本是否主要为中文
        
        Args:
            text: 要检测的文本
            
        Returns:
            如果中文字符占比>=80%则返回True,否则返回False
        """
        if not text:
            return False
        # 统计中文字符数量
        chinese_count = sum(1 for c in text if '\u4e00' <= c <= '\u9fff')
        # 统计总字符数（排除空白字符）
        non_whitespace = [c for c in text if not c.isspace()]
        if len(non_whitespace) <= 3:
            return False
        
        # 改进规则：检查是否全部为中文字符（允许少量标点符号）
        # 只统计字母和中文字符，计算中文占比
        non_punctuation = [c for c in non_whitespace if c.isalnum() or '\u4e00' <= c <= '\u9fff']
        if len(non_punctuation) == 0:
            return False
        
        # 中文字符占比需要达到80%以上才认为是纯中文内容
        chinese_ratio = chinese_count / len(non_punctuation)
        return chinese_ratio >= 0.8
    
    def get_user_translator(self, user_id: int, db: Session) -> Optional[Tuple[Optional[BaseTranslator], Optional[TranslationConfig]]]:
        """根据用户数据库配置获取翻译器实例和配置对象"""
        try:
            from api.model.user import User
            
            # 从用户表中获取默认翻译配置ID
            user = db.query(User).filter(User.id == user_id).first()
            if not user or not user.default_translation_config_id:
                logger.warning(f"用户 {user_id} 未设置默认翻译配置")
                return None, None
            
            # 查找对应的翻译配置（可以是自己的或共享的）
            default_config = db.query(TranslationConfig).filter(
                TranslationConfig.id == user.default_translation_config_id
            ).first()

            if not default_config:
                logger.warning(f"用户 {user_id} 的默认翻译配置 {user.default_translation_config_id} 不存在")
                return None, None

            provider = default_config.provider
            credentials = default_config.credentials

            if provider == "baidu":
                app_id = credentials.get("appId")
                secret_key = credentials.get("secretKey")
                if not app_id or not secret_key:
                    logger.error(f"用户 {user_id} 的百度翻译配置不完整")
                    return None, default_config
                return BaiduTranslator(app_id, secret_key), default_config
            
            elif provider == "deeplx":
                api_url = credentials.get("url")
                if not api_url:
                    logger.error(f"用户 {user_id} 的DeeplX翻译配置不完整")
                    return None, default_config
                # DeeplX的key是非必填项，这里暂时不处理
                return DeeplXTranslator(api_url), default_config

            elif provider == "ai":
                api_url = credentials.get("url")
                api_key = credentials.get("key")
                model = credentials.get("model")
                prompt = credentials.get("prompt") # prompt 是可选的
                if not api_url or not api_key or not model:
                    logger.error(f"用户 {user_id} 的AI翻译配置不完整")
                    return None, default_config
                return AITranslator(api_url, api_key, model, prompt), default_config
            
            else:
                logger.error(f"用户 {user_id} 配置了不支持的翻译服务: {provider}")
                return None, default_config

        except Exception as e:
            logger.error(f"为用户 {user_id} 获取翻译器时出错: {e}", exc_info=True)
            return None, None
    
    def translate_html(self, html_content: str, translator: BaseTranslator) -> Optional[str]:
        """
        使用lxml提取HTML文本并翻译（精确控制text、tail和attributes）
        [已修复] 1. 使用递归遍历保证视觉顺序
        [已修复] 2. 移除函数内的分批逻辑，交由Translator处理
        [已修复] 3. 增加对 'title', 'alt', 'placeholder' 属性的翻译
        [已修复] 4. 增加对HTML片段的正确处理
        """
        if not translator or not html_content or not html_content.strip():
            return html_content

        try:
            # 标记是否为HTML片段（用于最后序列化）
            is_fragment = not (html_content.strip().lower().startswith('<html') or html_content.strip().lower().startswith('<!doctype'))
            
            # 使用lxml解析HTML
            tree = lxml_html.fromstring(html_content)
            
            # 移除不需要翻译的标签
            for tag in tree.xpath('.//script | .//style | .//noscript'):
                tag.getparent().remove(tag)

            texts_to_translate = []
            text_locations = []  # 存储: (element, 'text'|'tail'|'attr_name', original_text)
            
            TRANSLATABLE_ATTRIBUTES = ['title', 'alt', 'placeholder']
            
            def should_translate(text):
                """判断文本是否需要翻译"""
                if not text or not text.strip():
                    return False
                
                stripped = text.strip()
                
                # 过滤全数字文本(允许少量标点符号)
                if re.match(r'^[\d\s\.,\-/:]+$', stripped):
                    return False
                
                # 过滤纯URL链接
                # 匹配常见的URL模式: http://、https://、www.、ftp://等
                url_pattern = r'^(https?://|ftp://|www\.)[^\s]+$'
                if re.match(url_pattern, stripped, re.IGNORECASE):
                    return False
                
                # 如果文本以URL开头，但后面还有内容，提取非URL部分
                # 例如: "https://example.com Check this out" -> "Check this out"
                url_prefix_pattern = r'^(https?://|ftp://|www\.)[^\s]+'
                url_match = re.match(url_prefix_pattern, stripped, re.IGNORECASE)
                if url_match:
                    # 移除URL部分，检查剩余内容
                    remaining_text = stripped[url_match.end():].strip()
                    # 检查剩余内容是否有实际文字（不仅仅是标点符号）
                    has_text_content = any(not c.isspace() and c not in '.,!?;:\'"()[]{}' for c in remaining_text)
                    if not has_text_content:
                        # 没有实质内容，不翻译
                        return False
                
                # 过滤纯标点符号或空白（更国际化的判断）
                # 检查是否有非空白、非标点的字符
                has_meaningful_content = any(
                    not c.isspace() and c not in '.,!?;:\'"()[]{}@#$%^&*-_=+|\\/<>`~'
                    for c in stripped
                )
                if not has_meaningful_content:
                    return False
                
                # 过滤CSS/JS
                if '{' in text and '}' in text:
                    return False
                if any(kw in text.lower() for kw in ['function', 'var ', 'let ', 'const ', 'return ']):
                    return False
                # 过滤MSO条件注释
                if '[if' in text or '<![endif]' in text or 'mso' in text.lower():
                    return False
                # 过滤VML
                if '<v:' in text or '</v:' in text or 'xmlns:v=' in text:
                    return False
                # 过滤已经是中文的文本
                if self.is_mostly_chinese(text):
                    return False
                return True

            def collect_texts_recursive(element):
                """
                【关键修复】按正确的视觉顺序递归收集文本
                顺序: 1. 属性, 2. .text, 3. 子元素 (递归), 4. .tail
                """
                
                # 0. 跳过我们不关心的节点
                if isinstance(element, lxml_html.HtmlComment):
                    return
                if element.tag in ['script', 'style', 'noscript']:
                    return

                # 1. 收集属性 (修复点 3)
                for attr_name in TRANSLATABLE_ATTRIBUTES:
                    if attr_name in element.attrib:
                        attr_value = element.get(attr_name)
                        if attr_value and should_translate(attr_value):
                            texts_to_translate.append(attr_value.strip())
                            text_locations.append((element, attr_name, attr_value))

                # 2. 收集 .text
                if element.text and should_translate(element.text):
                    texts_to_translate.append(element.text.strip())
                    text_locations.append((element, 'text', element.text))
                
                # 3. 递归处理所有子元素
                for child in element:
                    collect_texts_recursive(child)
                
                # 4. 最后处理 .tail
                if element.tail and should_translate(element.tail):
                    texts_to_translate.append(element.tail.strip())
                    text_locations.append((element, 'tail', element.tail))

            # --- 从根节点开始收集 (修复点 1) ---
            collect_texts_recursive(tree)
            
            if not texts_to_translate:
                logger.warning("在HTML中没有找到可翻译的文本")
                return html_content
            
            # --- 批量翻译 (修复点 2) ---
            # 移除原有的 "语义合并" 和 "创建翻译结果映射" 逻辑
            # 直接将所有片段交给翻译器处理，由翻译器决定如何分批
            translated_texts = translator.translate_batch(texts_to_translate)
            
            # --- 原样替换 (修复点 2 和 3) ---
            success_count = 0
            if not translated_texts or len(translated_texts) != len(texts_to_translate):
                logger.error(f"翻译返回的结果数量与请求数量不匹配！期望 {len(texts_to_translate)}, 得到 {len(translated_texts) if translated_texts else 0}")
                return None  # 返回None表示翻译失败，避免缓存部分结果

            for idx, (element, location_type, original_text) in enumerate(text_locations):
                translated_text = translated_texts[idx]
                
                if translated_text:
                    # 替换到原位置
                    if location_type == 'text' or location_type == 'tail':
                        # 保留原始文本的前后空白字符
                        leading_space = len(original_text) - len(original_text.lstrip())
                        trailing_space = len(original_text) - len(original_text.rstrip())
                        
                        final_text = original_text[:leading_space] + translated_text + original_text[-trailing_space:] if trailing_space > 0 else original_text[:leading_space] + translated_text
                        
                        if location_type == 'text':
                            element.text = final_text
                        else:
                            element.tail = final_text
                    
                    else:
                        # location_type 是属性名 (e.g., 'title', 'alt')
                        # 属性值通常不需要保留首尾空白
                        element.set(location_type, translated_text.strip())
                    
                    success_count += 1

            if success_count == 0:
                logger.error("HTML翻译失败：没有成功翻译任何文本片段。")
                return None
            
            # --- 序列化回HTML字符串 (修复点 4) ---
            result_html = ""
            
            # 检查是否是片段，如果是，则需要"解包"
            if is_fragment and (tree.tag == 'html' or tree.tag == 'body'):
                # 找到body，lxml.fromstring 可能会或可能不会添加 <html>
                body_tag = tree.find('body')
                if body_tag is None and tree.tag == 'body':
                     body_tag = tree
                
                if body_tag is not None:
                    # 返回body内部的所有内容 (包括文本和子标签)
                    inner_html = (body_tag.text or '') + \
                                 ''.join(lxml_html.tostring(child, encoding='unicode', method='html')
                                         for child in body_tag)
                    result_html = inner_html
                else:
                    # 回退到标准tostring
                    result_html = lxml_html.tostring(tree, encoding='unicode', method='html')
            else:
                 # 默认情况：返回完整的序列化树
                result_html = lxml_html.tostring(tree, encoding='unicode', method='html')
            
            return result_html
            
        except Exception as e:
            logger.error(f"翻译HTML失败: {e}", exc_info=True)
            return None
    
    def translate_email_content(self, user_id: int, db: Session, email_id: int, html_content: str, subject: str = None, eml_path: str = None, force_translate: bool = False) -> Optional[Dict[str, str]]:
        """
        翻译邮件内容和标题（基于文件存储和用户配置）
        
        Args:
            user_id: 用户ID
            db: 数据库会话
            email_id: 邮件ID
            html_content: 邮件HTML内容
            subject: 邮件标题
            eml_path: 邮件EML文件路径（用于确定翻译文件存储位置）
            force_translate: 是否跳过缓存强制重新翻译
        
        Returns:
            包含翻译后的HTML内容和标题的字典: {"content": str, "title": str}
        """
        # 1. 获取该用户的翻译器实例和配置
        translator, config = self.get_user_translator(user_id, db)
        if not translator or not config:
            logger.warning(f"用户 {user_id} 无法获取翻译器或配置，翻译任务中止")
            return "NO_DEFAULT_CONFIG"

        config_id = config.id

        # 2. 如果提供了eml_path且不是强制翻译，检查是否已有翻译文件
        if not force_translate and eml_path:
            import os
            # 新的缓存文件路径
            translated_file_path = eml_path.rsplit('.', 1)[0] + f'_zh_{config_id}.html'
            
            if os.path.exists(translated_file_path):
                try:
                    with open(translated_file_path, 'r', encoding='utf-8') as f:
                        file_content = f.read()
                        logger.info(f"从文件缓存读取翻译内容 - email_id: {email_id}, path: {translated_file_path}")
                        
                        # 从缓存文件中提取标题和内容
                        import re
                        title_match = re.search(r'<!-- TRANSLATED_TITLE: (.*?) -->', file_content)
                        if title_match:
                            translated_title = title_match.group(1)
                            # 移除标题注释，获取纯内容
                            translated_content = re.sub(r'<!-- TRANSLATED_TITLE: .*? -->\n?', '', file_content, count=1)
                            return {
                                "content": translated_content,
                                "title": translated_title
                            }
                        else:
                            # 旧格式缓存，没有标题
                            return {
                                "content": file_content,
                                "title": subject if subject else ""
                            }
                except Exception as e:
                    logger.error(f"读取翻译文件失败 - path: {translated_file_path}, error: {e}")
                    # 读取失败，继续重新翻译
        
        # 如果是强制翻译，记录日志
        if force_translate:
            logger.info(f"跳过缓存，强制重新翻译 - email_id: {email_id}, user_id: {user_id}")
        
        # 3. 执行翻译 - 将标题和内容一起翻译
        translated_title = None
        if subject and subject.strip():
            # 使用类方法判断标题是否需要翻译（如果已经是中文则不翻译）
            if not self.is_mostly_chinese(subject):
                # 翻译标题
                title_results = translator.translate_batch([subject.strip()])
                if title_results and title_results[0]:
                    translated_title = title_results[0]
                    logger.info(f"标题翻译成功 - email_id: {email_id}")
                else:
                    logger.warning(f"标题翻译失败，使用原标题 - email_id: {email_id}")
                    translated_title = subject
            else:
                # 标题已经是中文，不需要翻译
                translated_title = subject
        
        # 翻译内容
        translated_content = self.translate_html(html_content, translator)
        
        # 4. 检查翻译是否成功
        if not translated_content:
            logger.error(f"内容翻译失败 - email_id: {email_id}, 不保存翻译文件")
            return None
        
        # 5. 如果翻译成功且提供了eml_path，保存翻译结果到文件
        if eml_path:
            import os
            translated_file_path = eml_path.rsplit('.', 1)[0] + f'_zh_{config_id}.html'
            
            try:
                # 确保目录存在
                os.makedirs(os.path.dirname(translated_file_path), exist_ok=True)
                
                # 将标题和内容一起保存到文件（标题作为HTML注释保存在文件开头）
                file_content = ''
                if translated_title:
                    file_content = f'<!-- TRANSLATED_TITLE: {translated_title} -->\n'
                file_content += translated_content
                
                # 写入翻译文件
                with open(translated_file_path, 'w', encoding='utf-8') as f:
                    f.write(file_content)
                logger.info(f"翻译内容和标题已保存到文件 - email_id: {email_id}, path: {translated_file_path}")
            except Exception as e:
                logger.error(f"保存翻译文件失败 - path: {translated_file_path}, error: {e}")
                # 保存失败不影响返回翻译结果
        
        # 6. 返回翻译结果
        return {
            "content": translated_content,
            "title": translated_title if translated_title else subject
        }

    def delete_translation_cache_for_config(self, config_id: int, db: Session):
        """
        删除与特定翻译配置ID相关的所有翻译缓存文件
        
        Args:
            config_id: 翻译配置ID
            db: 数据库会话
        """
        import os
        from api.model.email import Email

        try:
            # 找到所有可能存在翻译缓存的邮件
            emails = db.query(Email.eml_path).filter(Email.eml_path.isnot(None)).all()
            if not emails:
                return

            deleted_count = 0
            # 兼容旧的缓存文件
            cache_suffix_to_delete_old = '_zh.html'
            # 新的带ID的缓存文件
            cache_suffix_to_delete_new = f'_zh_{config_id}.html'

            for (eml_path,) in emails:
                if not eml_path:
                    continue
                
                base_path = eml_path.rsplit('.', 1)[0]
                
                # 构造两种缓存文件路径
                old_cache_path = base_path + cache_suffix_to_delete_old
                new_cache_path = base_path + cache_suffix_to_delete_new

                # 为了兼容，同时删除旧格式的缓存文件
                if os.path.exists(old_cache_path):
                    try:
                        os.remove(old_cache_path)
                        deleted_count += 1
                        logger.info(f"成功删除旧的翻译缓存文件: {old_cache_path}")
                    except Exception as e:
                        logger.error(f"删除旧的翻译缓存文件失败: {old_cache_path}, error: {e}")

                # 删除新格式的缓存文件
                if os.path.exists(new_cache_path):
                    try:
                        os.remove(new_cache_path)
                        deleted_count += 1
                        logger.info(f"成功删除翻译缓存文件: {new_cache_path}")
                    except Exception as e:
                        logger.error(f"删除翻译缓存文件失败: {new_cache_path}, error: {e}")
            
            if deleted_count > 0:
                logger.info(f"共删除了 {deleted_count} 个与配置ID {config_id} 相关的翻译缓存文件")

        except Exception as e:
            logger.error(f"删除配置ID {config_id} 的翻译缓存时出错: {e}", exc_info=True)

    def delete_translation_cache_for_email(self, eml_path: str):
        """
        删除单个邮件的所有翻译缓存文件

        Args:
            eml_path: 邮件的eml文件路径
        """
        import os
        import glob

        if not eml_path:
            return

        try:
            base_path = eml_path.rsplit('.', 1)[0]
            
            # 匹配所有可能的翻译缓存文件，例如 _zh.html, _zh_1.html, _zh_123.html
            search_pattern = f"{base_path}_zh_*.html"
            legacy_file = f"{base_path}_zh.html"
            
            files_to_delete = glob.glob(search_pattern)
            if os.path.exists(legacy_file):
                files_to_delete.append(legacy_file)

            deleted_count = 0
            for file_path in files_to_delete:
                try:
                    os.remove(file_path)
                    deleted_count += 1
                    logger.info(f"成功删除邮件翻译缓存: {file_path}")
                except Exception as e:
                    logger.error(f"删除邮件翻译缓存失败: {file_path}, error: {e}")
            
            if deleted_count > 0:
                logger.info(f"共为邮件 {eml_path} 删除了 {deleted_count} 个翻译缓存")

        except Exception as e:
            logger.error(f"为邮件 {eml_path} 删除翻译缓存时出错: {e}", exc_info=True)



# 创建全局翻译服务实例
translation_service = TranslationService()