#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author          : ricsy
@File            : sphinx_article_meta.py
@Create Time     : 2025/6/6 13:50
@Last Modified   :
@Description     : 文章阅读时长插件
    多维度分析精确估算文章阅读时长，包含：内容复杂度分析、语言自适应处理等
"""
import math
import os
import re
from fnmatch import translate
from pathlib import PurePosixPath, PureWindowsPath
from typing import Dict

import jieba
from docutils import nodes
from docutils.nodes import title
from readtime import of_text

from sphinx.application import Sphinx


# 默认页面元素配置（包含数据量以及单个阅读时间）
element_stats = {
    "table": {"count": 0, "time": 15},
    "code-blocks": {
        "short": {"count": 0, "time": 5},
        "medium": {"count": 0, "time": 30},
        "long": {"count": 0, "time": 60},
    },
    "image": {"count": 0, "time": 5},
}
line_threshold = {"short": 10, "medium": 30}  # 行数阈值


def normalize_path(path: str) -> str:
    """
    跨平台路径归一化处理（将所有路径统一转为 POSIX格式）
    """
    # 处理原始字符串中的反斜杠（Windows 路径）
    if "\\" in path:
        try:
            # 优先尝试 Windows 路径解析
            path = str(PureWindowsPath(path).as_posix())
        except ValueError:
            # 回退到字符串替换
            path = path.replace("\\", "/")

    # 统一转为POSIX路径（处理混合分隔符）
    path = str(PurePosixPath(path).as_posix())

    raw_path = path.encode("unicode-escape").decode() if "\\" in path else path

    # 2. 移除 URL 锚点和查询参数
    normalized = re.sub(r"[#?].*$", "", raw_path)

    # 3. 移除尾部斜杠
    normalized = normalized.rstrip("/")

    return normalized


def should_skip_doc(docname: str, exclude_list: list) -> bool:
    """判断是否应跳过当前文档"""
    if "#" in docname:  # 快速跳过锚点链接
        return True

    # 将通配符模式转换为正则表达式
    compiled_rules = []
    for pattern in exclude_list:
        if isinstance(pattern, re.Pattern):
            compiled_rules.append(pattern)
        else:
            # 转换通配符为正则（如 *.rst -> .*\.rst）
            regex = re.compile(translate(pattern))
            compiled_rules.append(regex)

    # 检查是否匹配任何排除规则
    for rule in compiled_rules:
        if rule.fullmatch(docname):
            return True

    return False


def extract_table_text(table_node):
    """提取表格内容（自动跳过标题）"""
    content = []

    # 跳过表格标题节点（caption）
    for child in table_node.children:
        if not isinstance(child, title):
            content.append(child.astext())

    return " ".join(content)


def extract_code_block(node):
    """根据代码块行数分类统计"""
    lines = node.astext().count("\n") + 1  # 计算代码行数
    code_stats_short = element_stats["code-blocks"].get("short", {})
    code_stats_medium = element_stats["code-blocks"].get("medium", {})
    code_stats_long = element_stats["code-blocks"].get("long", {})
    short_threshold = line_threshold.get("short", 10)
    medium_threshold = line_threshold.get("medium", 30)

    if lines <= short_threshold:
        code_stats_short["count"] += 1
    elif short_threshold + 1 <= lines <= medium_threshold:
        code_stats_medium["count"] += 1
    else:
        code_stats_long["count"] += 1


def get_raw_content(doctree: nodes.document) -> (str, Dict[str, int]):
    """获取文档原始文本内容（排除代码块、注释等非内容节点）"""
    content = []
    for node in doctree.findall():
        # 1. 跳过非叶节点
        if isinstance(node, (nodes.section, nodes.document)):
            continue

        # 2. 统计标题（section 标题）
        if isinstance(node, nodes.title):
            title_text = node.astext().strip()
            if title_text:
                content.append(f"{title_text}")
            continue

        # 3. 处理表格
        if isinstance(node, nodes.table):
            element_stats["table"]["count"] += 1
            content.append(extract_table_text(node))
            continue
        # 4. 处理代码块
        if isinstance(node, nodes.image):
            element_stats["image"]["count"] += 1
            continue
        # 5. 处理图片
        if isinstance(node, nodes.literal_block):
            extract_code_block(node)
            continue
        # 6. 处理其他内容节点
        if isinstance(node, (nodes.paragraph, nodes.list_item)):
            text = node.astext().strip().replace("\n", " ")
            if text and text not in ["-"]:  # 忽略空文本
                content.append(text)
    return " ".join(content)


def calculate_readtime(text: str, lang: str = "en") -> str:
    """精确计算阅读时长"""
    wpm = 300 if lang.startswith("zh") else 200  # 中英文不同阅读速度
    result = of_text(text, wpm=wpm)
    minutes = result.minutes
    extra_seconds = 0
    for key, value in element_stats.items():
        if key == "code-blocks":
            for _, info in value.items():
                if (info or {})["count"] > 0:
                    extra_seconds += (info or {})["time"] * (info or {})["count"]
        else:
            if value["count"] > 0:
                extra_seconds += value["time"] * value["count"]
    # 总分钟数（向上取整）
    return math.ceil(minutes + extra_seconds / 60)


def process_reading_time(app: Sphinx, doctree: nodes.document, docname: str):
    """处理阅读时长计算"""
    # 归一化路径
    normalized_excludes = [normalize_path(x) for x in app.config.reading_time_exclude]

    # 获取文件后缀
    ext = getattr(app.env, "doc_extensions", {}).get(docname, "")
    full_docname = f"{normalize_path(docname)}{ext}"

    if not app.config.reading_time_enable or should_skip_doc(
        full_docname, normalized_excludes
    ):
        return

    # logger.info(f"开始计算阅读时长：{full_docname}", 1)
    raw_content = get_raw_content(doctree)
    # logger.info(f"页面元素统计结果：{element_stats}", 2)

    time_text = ""
    text_length = 0
    if raw_content:
        time_text = calculate_readtime(raw_content, app.config.language)
        cleaned_content = raw_content.replace("\n", " ")
        # 预编译正则表达式减少重复开销
        char_pattern = re.compile(
            r"^([\u4e00-\u9fa5]+|[a-zA-Z0-9%]+|[a-zA-Z]?\d+(?:\.\d+)*\.?)$"
        )
        text_content = [
            char if "\u4e00" <= char <= "\u9fa5" else item  # 中文拆单字，其他保留原分词
            for item in list(jieba.cut(cleaned_content))
            if item.strip() and char_pattern.match(item)
            for char in (
                list(item) if "\u4e00" <= item <= "\u9fa5" else [item]
            )  # 中文拆解
        ]
        text_length = len(text_content)
        # logger.info(f"阅读时长：{time_text}，文章字数：{text_length}", 2)
    # 将阅读时间添加到环境变量中
    if not hasattr(app.env, "reading_time"):
        app.env.reading_time = {}
    if not hasattr(app.env.reading_time, docname):
        app.env.reading_time[docname] = {}
    app.env.reading_time[docname].update(
        {
            "time": time_text,
            "length": text_length,
        }
    )


def setup(app: Sphinx) -> Dict:
    """扩展注册"""

    # noinspection PyShadowingNames, PyUnusedLocal
    def track_file_extensions(app: Sphinx, docname: str, source: list):
        """记录文档的原始文件后缀"""
        if not hasattr(app.env, "doc_extensions"):
            app.env.doc_extensions = {}

        # 从源文件路径提取后缀
        source_path = app.env.doc2path(docname)
        ext = os.path.splitext(source_path)[1].lower()
        app.env.doc_extensions[docname] = ext

    # noinspection PyUnusedLocal, PyShadowingNames
    def add_time_to_context(app: Sphinx, pagename, templatename, context, doctree):
        # 将文章元数据添加到模板上下文中
        context.update(
            {
                "reading_time": (
                    app.env.reading_time.get(pagename, {})
                    if hasattr(app.env, "reading_time") and app.env.reading_time
                    else {}
                ),
            }
        )

    # 注册配置
    app.add_config_value("reading_time_enable", True, "html")
    # 支持正则表达式
    app.add_config_value("reading_time_exclude", ["404.md"], "html")

    # 记录文件后缀
    app.connect("source-read", track_file_extensions)
    # 计算阅读时长
    app.connect("doctree-resolved", process_reading_time)
    # 添加到模板上下文
    app.connect("html-page-context", add_time_to_context)

    # 返回 Sphinx 扩展契约信息
    return {"version": "1.0", "parallel_read_safe": True, "parallel_write_safe": True}
