import hashlib
import re
from langchain.text_splitter import RecursiveCharacterTextSplitter


def generate_parent_id(filename, title):
    """生成唯一 parent_id，避免不同文件/标题冲突"""
    unique_key = f"{filename.strip()}-{title.strip()}".encode("utf-8")
    return hashlib.md5(unique_key).hexdigest()


def build_heading_tree(md_text):
    """
    利用正则构建 Markdown 文档的标题树，返回一个树结构。
    每个节点包含：level, title, content, children
    """
    lines = md_text.splitlines()
    root = {"level": 0, "title": "root", "content": "", "children": []}
    stack = [root]

    for line in lines:
        heading_match = re.match(r"^(#{1,6})\s+(.*)", line)
        if heading_match:
            level = len(heading_match.group(1))
            title = heading_match.group(2).strip()
            node = {"level": level, "title": title, "content": "", "children": []}
            # 将当前节点加入到合适的父节点中
            while stack and stack[-1]["level"] >= level:
                stack.pop()
            stack[-1]["children"].append(node)
            stack.append(node)
        else:
            # 普通行内容归入当前最后一个节点的 content
            if stack:
                if stack[-1]["content"]:
                    stack[-1]["content"] += "\n" + line
                else:
                    stack[-1]["content"] = line
    return root


# 维护一个记录已处理父文档 ID 的集合
seen_parent_ids = set()


def flatten_heading_tree(
    node, parent_docs, child_docs, filename, max_parent_length=1000
):
    """
    递归遍历标题树，将节点转换为父文档和子文档。
    对于内容超长的节点，调用 RecursiveCharacterTextSplitter 进行拆分。
    存入向量库时，将父文档标题和文本拼接在一起。
    """
    # 忽略根节点
    if node["level"] != 0:
        title = node["title"]
        content = node["content"].strip()
        if title and content:
            doc_id = generate_parent_id(filename, title)
            # 确保同一个父文档不会重复添加
            if doc_id not in seen_parent_ids:
                parent_docs.append({"id": doc_id, "title": title, "content": content})
                seen_parent_ids.add(doc_id)
            if len(content) > max_parent_length:
                # 内容超长时，拆分为更小的片段
                chunks = RecursiveCharacterTextSplitter(
                    chunk_size=500,
                    chunk_overlap=100,
                    separators=["\n\n", "\n", "。", ".", "，", ","],
                ).split_text(content)
                for chunk in chunks:
                    # 将标题和 chunk 拼接后存入子文档
                    combined_text = f"{title}\n{chunk}"
                    child_docs.append({"parent_id": doc_id, "text": combined_text})
            else:
                chunks = RecursiveCharacterTextSplitter(
                    chunk_size=500,
                    chunk_overlap=100,
                    separators=["\n\n", "\n", "。", ".", "，", ","],
                ).split_text(content)
                for chunk in chunks:
                    combined_text = f"{title}\n{chunk}"
                    child_docs.append({"parent_id": doc_id, "text": combined_text})

    # 递归处理子节点
    for child in node["children"]:
        flatten_heading_tree(
            child, parent_docs, child_docs, filename, max_parent_length
        )


def parse_markdown(md_text, filename, max_parent_length=1000):
    """
    智能解析 Markdown 文档，智能识别标题和段落，
    生成：
    - parent_docs：存储每个拆分后的父文档，包含 id、title、content。
    - child_docs：对每个父文档内容进一步拆分生成的子文档，存储 parent_id 和拼接了标题后的文本 chunk，
      便于向量库检索时既能利用标题也能利用正文信息。
    """
    parent_docs = []
    child_docs = []

    # 利用智能标题树构建解析 Markdown 文件
    tree = build_heading_tree(md_text)
    flatten_heading_tree(tree, parent_docs, child_docs, filename, max_parent_length)

    return parent_docs, child_docs
