"""
    fuzzy_search.py

    MediaWiki API Demos
    Demo of `Search` module: Perform a fuzzy search

    MIT License
"""

import requests

if __name__ == "__main__":
    S = requests.Session()

    URL = "https://vcpedia.cn/api.php"

    # 搜索词后添加 ~ 表示模糊搜索
    SEARCH_TERM = "言和~"

    PARAMS = {
        "action": "query",
        "format": "json",
        "list": "search",
        "srsearch": SEARCH_TERM,
        "srwhat": "text",  # 搜索正文内容
        "srlimit": 5       # 限制返回结果数量
    }

    R = S.get(url=URL, params=PARAMS)

    print(R.text)

    DATA = R.json()

    print(DATA)

    print(f"搜索 '{SEARCH_TERM}' 的结果:")
    print(f"找到 {DATA['query']['searchinfo']['totalhits']} 个匹配项")
    print("前几个结果:")

    for result in DATA['query']['search']:
        print(f"- {result['title']} (ID: {result['pageid']})")
        print(f"  摘要: {result['snippet']}")
        print()
        


        # 指定要查询的页面ID
        PAGE_ID = result['pageid']  # 替换为你想要查询的实际页面ID

        PARAMS = {
            "action": "query",
            "format": "json",
            "prop": "extracts|info",  # extracts获取内容，revisions获取完整wikitext
            "pageids": PAGE_ID,
            "rvprop": "content",  # 获取完整的wikitext内容
            "inprop": "url",      # 获取页面的完整URL
            "formatversion": "2"  # 使用更现代的JSON格式
        }

        R = S.get(url=URL, params=PARAMS)
        print(R.text)
        DATA = R.json()

        # 提取页面信息
        pages = DATA.get('query', {}).get('pages', {})

        print(pages)
        """
        page_info = pages.get(str(PAGE_ID), {})
        
        if page_info:
            print(f"页面标题: {page_info.get('title', '未知')}")
            print(f"页面ID: {PAGE_ID}")
            print(f"完整URL: {page_info.get('fullurl', '未知')}")
            print("\n页面内容:")
            print(page_info.get('extract', '无内容'))
        else:
            print(f"未找到ID为 {PAGE_ID} 的页面")
        """
        for page in pages:
            print(f"页面标题: {page.get('title', '未知')}")
            print(f"页面ID: {page.get('pageid', '未知')}")
            print(f"完整URL: {page.get('fullurl', '未知')}")
            print("\n页面内容:")
            print(page.get('extract', '无内容'))
            print()