# backend/utils.py
"""
翻译工具函数模块
包含百度翻译API调用、文本保护、翻译后处理等功能
"""
import re
import random
import hashlib
import requests
from typing import List, Dict, Tuple

# 百度翻译API配置
BAIDU_TRANSLATE_APPID = '20251013002474387'
BAIDU_TRANSLATE_APPKEY = 'sbdaZJn0eKEdwloyrhHe'
BAIDU_TRANSLATE_URL = 'http://api.fanyi.baidu.com/api/trans/vip/translate'

# =========================
#  占位符（PUA 包裹，极难被翻译器改写）
# =========================
TOKEN_PREFIX = "\uE000"  # ''
TOKEN_SUFFIX = "\uE001"  # ''
# 形如：12:7b1c（12 为索引，7b1c 为 md5 前 4 位）
STD_TOKEN_RE = re.compile(r"\uE000(\d+):([0-9a-fA-F]{4})\uE001")

# 兼容：旧的尖括号 token（含被翻译器"压坏"的变体，如 <<3:7b1c>、<<<3:7b1c>>>> 等）
ANGLE_TOKEN_LOOSE_RE = re.compile(r"<{1,6}\s*(\d+)\s*:\s*([0-9a-fA-F]{4})\s*>{1,6}")

# 兼容：老格式 [[PRESERVE_3]]（含误拼/空格）
LEGACY_TOKEN_RE = re.compile(r"\[\[\s*PRE\S*?\s*[_-]?\s*(\d+)\s*\]\]", re.IGNORECASE)

def _md5_4(s: str) -> str:
    return hashlib.md5(s.encode("utf-8")).hexdigest()[:4]

def _make_token(idx: int, raw: str) -> str:
    return f"{TOKEN_PREFIX}{idx}:{_md5_4(raw)}{TOKEN_SUFFIX}"

def _normalize_angle_tokens_to_pua(text: str) -> str:
    """将各种 <<...>> / <<<...>>> 变体统一规范化为 PUA 形式：idx:hash"""
    def repl(m: re.Match) -> str:
        idx = int(m.group(1))
        h   = m.group(2).lower()
        return f"{TOKEN_PREFIX}{idx}:{h}{TOKEN_SUFFIX}"
    return ANGLE_TOKEN_LOOSE_RE.sub(repl, text)

def _protect_text_segments(text: str) -> Tuple[str, Dict[str, str]]:
    """
    保护不需要翻译的片段，返回(被占位后的文本, 占位符->原文 映射)。
    保护范围：
    - Markdown 代码块 ```...```
    - 行内代码 `...`
    - 中文/英文单引号 '…' / '…'
    - 中文/英文双引号 "…" / "…"
    - 常见代码/函数名/点分标识符/术语白名单
    """
    if not text:
        return text, {}

    _re = re
    patterns = [
        _re.compile(r"```[\s\S]*?```", _re.MULTILINE),   # fenced code block
        _re.compile(r"`[^`\n]+`"),                       # inline code
        # 不保护 markdown headers，允许其被翻译
        _re.compile(r"'[^'\n]+'"),                       # curly single quotes
        _re.compile(r"'[^'\n]+'"),                       # straight single quotes
        _re.compile(r'"[^"\n]+"'),                       # curly double quotes
        _re.compile(r'"[^"\n]+"'),                       # straight double quotes
    ]

    # 额外保护：函数名/标识符/术语
    identifier_patterns = [
        _re.compile(r"\b[A-Za-z_]\w*(?=[ \t]*\()"),                          # foo(
        _re.compile(r"\b[A-Za-z_]\w*(?:\.[A-Za-z_]\w+)+\b"),                 # msg.sender
    ]
    term_whitelist = [
        "Ether", "Wei", "ETH",
        "Reentrancy", "reentrancy", "ReentrancyGuard", "nonReentrant",
        "Ownable", "onlyOwner",
        "Solidity",
        "SWC", "SWC-107", "SWC-112",
    ]
    if term_whitelist:
        term_whitelist_sorted = sorted(term_whitelist, key=len, reverse=True)
        term_regex = r"\b(?:%s)\b" % "|".join(_re.escape(t) for t in term_whitelist_sorted)
        identifier_patterns.append(_re.compile(term_regex))

    # 收集全部匹配区间
    matches = []
    for pat in patterns:
        matches.extend(list(pat.finditer(text)))
    for pat in identifier_patterns:
        matches.extend(list(pat.finditer(text)))

    # 排序 + 过滤重叠
    matches.sort(key=lambda m: (m.start(), m.end()))
    filtered = []
    last_end = -1
    for m in matches:
        if m.start() >= last_end:
            filtered.append(m)
            last_end = m.end()

    # 替换为占位符
    result_chunks: List[str] = []
    cursor = 0
    token_map: Dict[str, str] = {}
    token_index = 0
    for m in filtered:
        if cursor < m.start():
            result_chunks.append(text[cursor:m.start()])
        raw = text[m.start():m.end()]
        token = _make_token(token_index, raw)
        token_map[token] = raw
        result_chunks.append(token)
        cursor = m.end()
        token_index += 1
    if cursor < len(text):
        result_chunks.append(text[cursor:])

    protected_text = "".join(result_chunks)
    return protected_text, token_map

def _restore_text_segments(text: str, token_map: Dict[str, str]) -> str:
    """将占位符替换回原文片段（严格替换 + 角括号变体规范化 + 老格式兼容）。"""
    if not token_map or not text:
        return text

    # 0) 先把"尖括号变体"规范化为 PUA 标准形式（修复 <<< / << / >>> 混乱）
    text = _normalize_angle_tokens_to_pua(text)

    # 1) 先严格替换（PUA 标准 token）
    for token, raw in token_map.items():
        if token in text:
            text = text.replace(token, raw)

    # 2) 兜底：扫描剩余的 PUA 标准形态
    std_to_raw = dict(token_map)
    def pua_repl(m: re.Match) -> str:
        idx = int(m.group(1))
        h   = m.group(2).lower()
        key = f"{TOKEN_PREFIX}{idx}:{h}{TOKEN_SUFFIX}"
        return std_to_raw.get(key, m.group(0))
    text = STD_TOKEN_RE.sub(pua_repl, text)

    # 3) 兼容老格式 [[PRESERVE_3]]（仅按 idx 近似恢复）
    idx_to_std = {}
    for std in std_to_raw.keys():
        m = STD_TOKEN_RE.match(std)
        if m:
            idx = int(m.group(1))
            if idx not in idx_to_std:
                idx_to_std[idx] = std

    def legacy_repl(m: re.Match) -> str:
        idx = int(m.group(1))
        std = idx_to_std.get(idx)
        return std_to_raw.get(std, m.group(0)) if std else m.group(0)

    text = LEGACY_TOKEN_RE.sub(legacy_repl, text)
    return text

def _safe_chunks(text: str, max_len: int) -> List[str]:
    """安全切块：不切开 PUA 占位符（...）。尽量在空白/标点处分割。"""
    chunks = []
    i = 0
    L = len(text)
    while i < L:
        j = min(i + max_len, L)
        # 如果中间切到一个未闭合的 token，则回退到 token 开始处
        left = text.rfind(TOKEN_PREFIX, i, j)
        right = text.rfind(TOKEN_SUFFIX, i, j)
        if left != -1 and (right == -1 or right < left):
            j = left
            if j <= i:
                end_token = text.find(TOKEN_SUFFIX, left)
                if end_token != -1:
                    j = end_token + len(TOKEN_SUFFIX)
                else:
                    j = L
        # 尝试在 j 附近向左寻找空白/标点
        k = j
        while k > i and not text[k-1].isspace() and text[k-1] not in "，。；,.!?;:)]}":
            k -= 1
        if k > i + 20:
            j = k
        chunks.append(text[i:j])
        i = j
    return chunks

def _normalize_markdown(s: str) -> str:
    """轻度 Markdown 规范化：补列表项空格，不改语义。"""
    return re.sub(r'(?m)^( *)(-)(\S)', r'\1- \3', s)

# =========================
#   翻译后处理（术语统一 + 标题本地化 + 排版修复 + 结果行规范化）
# =========================
GLOSSARY = [
    (r'\bVulnerability Identification\b', '漏洞识别'),
    (r'\bImpact Assessment\b', '影响评估'),
    (r'\bSecurity Recommendations\b', '安全建议'),
    (r'\bDescription\b', '描述'),
    (r'\bLocation\b', '位置'),
    (r'\bReentrancy\b', '重入'),
    (r'\breentrancy\b', '重入')
]

REPHRASE = [
    (r'合同', '合约'),
    (r'地点', '位置'),
    (r'说明', '描述'),
    (r'重入\s*攻击', '重入攻击'),
    (r'重复传输\s*Ether', '重复转移 Ether'),
    (r'拨打电话\s*`withdraw`', '调用 `withdraw`'),
    (r'呼叫者', '调用方'),
    (r'The\s*`withdraw`', '`withdraw`'),  # 去除多余 "The "
    # 英转中表达优化：uses `call` to transfer Ether → 使用 `call` 将 Ether 转给调用方
    (r'uses\s*`call`\s*to\s*transfer\s*Ether(?:\s*to\s*the\s*caller)?', '使用 `call` 将 Ether 转给调用方'),
    # function cannot → 函数不可
    (r'\bfunction\s+cannot\b', '函数不可'),
    # "with `transfer` or `send` to send Ether" → "将 `call` 替换为 `transfer`（或 `send`）"
    (r'with\s*`transfer`\s*or\s*`send`\s*to\s*send\s*Ether', '为 `transfer`（或 `send`）'),
    # 英文句号统一为中文句号（不动代码块）
]

def _replace_outside_backticks(s: str, pat: str, rep: str) -> str:
    """仅替换反引号外部文本，保留代码片段原样。"""
    parts = re.split(r'(`[^`]*`)', s)  # 偶数索引：普通文本；奇数索引：反引号内容
    for i in range(0, len(parts), 2):
        parts[i] = re.sub(pat, rep, parts[i], flags=re.IGNORECASE)
    return ''.join(parts)

def _standardize_headings(text: str) -> str:
    """把英文小节标题改成中文（保留 ## 前缀）。"""
    for pat, rep in GLOSSARY:
        # 仅当 pat 是小节名时才替换
        if pat in [r'\bVulnerability Identification\b', r'\bImpact Assessment\b', r'\bSecurity Recommendations\b', r'\bDescription\b', r'\bLocation\b']:
            text = re.sub(rf'(?m)^(##\s*){pat}\s*$', rf'\1{rep}', text)
    return text

def _tidy_bullets(text: str) -> str:
    """统一列表项与缩进、标点、反引号粘连空格。"""
    # 反引号粘连修复：X`foo` → X `foo`; `foo`Y → `foo` Y
    text = re.sub(r'([^\s`])(`[^`]+`)', r'\1 \2', text)
    text = re.sub(r'(`[^`]+`)([^\s`])', r'\1 \2', text)
    # 列表项：- 后补空格
    text = re.sub(r'(?m)^( *)-(\S)', r'\1- \2', text)
    # "位置/描述"下的子项：用二级缩进
    text = re.sub(r'(?m)^- 位置：\s*\n\s*-\s*', r'- 位置：\n  - ', text)
    text = re.sub(r'(?m)^- 描述：\s*\n\s*-\s*', r'- 描述：\n  - ', text)
    # 句末英文句号统一为中文句号（不动代码块）
    lines = text.splitlines()
    for i, ln in enumerate(lines):
        if ln.strip().startswith("```"):
            # 跳过代码块内部
            j = i + 1
            while j < len(lines) and not lines[j].strip().startswith("```"):
                j += 1
            i = j
            continue
        lines[i] = re.sub(r'([^\n。])\.\s*$', r'\1。', ln)
    return "\n".join(lines)

def _normalize_result_line(text: str) -> str:
    """
    规范化/保留最后的 Result 行：
    - 若文本中已出现 Result: 0/1，则抽取"最后一次出现"的值，并在结尾单独输出一行"Result: X"
    - 若未出现，不添加
    """
    m_all = list(re.finditer(r'(?i)\bResult\s*:\s*([01])\b', text))
    if not m_all:
        return text
    val = m_all[-1].group(1)
    # 去掉文中其它 Result 提及，仅保留末尾一行
    text_wo = re.sub(r'(?i)\bResult\s*:\s*[01]\b', '', text)
    text_wo = text_wo.rstrip() + "\n\nResult: " + val
    return text_wo

def postprocess_cn(text: str) -> str:
    """中文后处理：标题本地化、术语统一、表达优化、排版修复、结果行规范化。"""
    # 1) 标题本地化
    text = _standardize_headings(text)

    # 2) 术语统一与表达优化（仅替换反引号外）
    for pat, rep in GLOSSARY + REPHRASE:
        text = _replace_outside_backticks(text, pat, rep)

    # 3) 专门优化你给定原文的关键句式
    # "The `withdraw` function uses `call` to transfer Ether to the caller, which is vulnerable to reentrancy attacks."
    text = _replace_outside_backticks(
        text,
        r'The\s*`withdraw`\s*function\s*uses\s*`call`\s*to\s*transfer\s*Ether\s*to\s*the\s*caller.*reentrancy\s*attacks?',
        r'`withdraw` 函数使用 `call` 将 Ether 转给调用方，这一做法容易被重入攻击利用。'
    )
    # "Replace `call` with `transfer` to ensure the function cannot be reentered"
    text = _replace_outside_backticks(
        text,
        r'Replace\s*`call`\s*with\s*`transfer`(?:\s*or\s*`send`)?\s*to\s*ensure\s*the\s*function\s*cannot\s*be\s*reentered',
        r'将 `call` 替换为 `transfer`（或 `send`），并遵循 Checks-Effects-Interactions 模式，确保函数不可被重入'
    )

    # 4) 列表与标点、反引号空格、层级缩进
    text = _tidy_bullets(text)

    # 5) 结果行规范化（保留"Result: X"单独一行）
    # text = _normalize_result_line(text)

    return text

# =========================
#   主翻译流程
# =========================
def translate_text(text: str, from_lang: str = 'en', to_lang: str = 'zh') -> str:
    """
    使用百度翻译API将文本从英文翻译为中文
    支持长文本分段翻译（百度翻译API限制每次最多5000字符）
    """
    if not text or not text.strip():
        return text

    MAX_CHARS = 4500  # 留一些余量

    # 保护片段
    protected_text, token_map = _protect_text_segments(text)

    # 单段
    if len(protected_text) <= MAX_CHARS:
        translated = _translate_single(protected_text, from_lang, to_lang)
        restored = _restore_text_segments(translated, token_map)
        restored = _normalize_markdown(restored)
        return postprocess_cn(restored)

    # 多段（安全切块）
    print(f"📝 [翻译] 文本较长({len(text)}字符)，分段翻译...")
    translated_parts = []
    for chunk in _safe_chunks(protected_text, MAX_CHARS):
        translated_parts.append(_translate_single(chunk, from_lang, to_lang))

    translated_full = "".join(translated_parts)
    restored = _restore_text_segments(translated_full, token_map)
    restored = _normalize_markdown(restored)
    return postprocess_cn(restored)

def _translate_single(text: str, from_lang: str = 'en', to_lang: str = 'zh') -> str:
    """翻译单段文本（POST 用 data=payload，尽量保持换行）。"""
    print(f"翻译单段文本: {text[:120]}{'...' if len(text) > 120 else ''}")
    try:
        if not isinstance(text, str):
            text = str(text)

        salt = random.randint(32768, 65536)
        sign_str = BAIDU_TRANSLATE_APPID + text + str(salt) + BAIDU_TRANSLATE_APPKEY
        sign = hashlib.md5(sign_str.encode("utf-8")).hexdigest()

        payload = {
            'appid': BAIDU_TRANSLATE_APPID,
            'q': text,
            'from': from_lang,
            'to': to_lang,
            'salt': salt,
            'sign': sign
        }
        headers = {'Content-Type': 'application/x-www-form-urlencoded'}

        resp = requests.post(BAIDU_TRANSLATE_URL, data=payload, headers=headers, timeout=30)
        if resp.status_code != 200:
            print(f"⚠️  [翻译警告] HTTP状态码: {resp.status_code}")
            print(f"响应内容: {resp.text[:200]}")
            return text

        result = resp.json()
        if 'error_code' in result:
            print(f"⚠️  [翻译警告] 翻译API错误: {result.get('error_code')} - {result.get('error_msg')}")
            return text

        if 'trans_result' in result and result['trans_result']:
            parts = [item.get('dst', item.get('src', '')) for item in result['trans_result']]
            return "\n".join(parts)
        else:
            print(f"⚠️  [翻译警告] 翻译API返回异常: {result}")
            return text

    except Exception as e:
        print(f"⚠️  [翻译错误] {e}")
        return text

