from typing import List, Tuple, Union
from langchain_core.documents import Document
from bs4 import BeautifulSoup
import requests

# 国内可访问的替代网址（17173游戏新闻）
page_url = "https://news.17173.com/content/10052024/023602320.shtml"

def _get_setup_docs_from_url(url: str) -> List[Union[Document, Tuple[Document, Document]]]:
    # 1. 获取网页HTML
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, "html.parser")

    setup_docs = []
    parent_id = None
    current_parent = None

    # 2. 解析标题（假设标题在<h1>, <h2>, <h3>标签中）
    for heading in soup.find_all(["h1", "h2", "h3"]):
        heading_text = heading.get_text().strip()
        if heading_text:
            parent_id = f"heading_{len(setup_docs)}"
            current_parent = Document(
                page_content=heading_text,
                metadata={
                    "category": "Title",
                    "element_id": parent_id,
                    "source": url,
                    "tag": heading.name  # 存储HTML标签类型（h1/h2/h3）
                }
            )
            setup_docs.append(current_parent)

            # 3. 提取该标题下的段落（假设在后续<p>标签中）
            next_sibling = heading.find_next_sibling()
            while next_sibling and next_sibling.name not in ["h1", "h2", "h3"]:
                if next_sibling.name == "p":
                    paragraph = next_sibling.get_text().strip()
                    if paragraph:
                        setup_docs.append((
                            current_parent,
                            Document(
                                page_content=paragraph,
                                metadata={
                                    "category": "Text",
                                    "parent_id": parent_id,
                                    "source": url
                                }
                            )
                        ))
                next_sibling = next_sibling.find_next_sibling()

    # 4. 解析表格（<table>标签）
    for table in soup.find_all("table"):
        table_text = table.get_text().strip()
        if table_text:
            parent_id = f"table_{len(setup_docs)}"
            current_parent = Document(
                page_content=table_text,
                metadata={
                    "category": "Table",
                    "element_id": parent_id,
                    "source": url
                }
            )
            setup_docs.append(current_parent)

    return setup_docs

if __name__ == "__main__":
    docs = _get_setup_docs_from_url(page_url)

    for item in docs:
        if isinstance(item, tuple):
            parent, child = item
            print(f'[父元素] {parent.metadata["category"]} ({parent.metadata.get("tag", "")}): {parent.page_content[:50]}...')
            print(f'[子元素] {child.metadata["category"]}: {child.page_content[:50]}...')
        else:
            print(f'[{item.metadata["category"]}] {item.page_content[:50]}...')
        print("-" * 80)