import re
from bs4 import BeautifulSoup
import json
from pathlib import Path
from collections import deque

# 将root改成您存放html文件的目录
root = Path(".")

# 扫描html文件
def scanfile(path: Path, content) -> dict:
    htmlcontent = BeautifulSoup(content, 'html.parser')
    # 将p, h2, h3, h4的内容，去除符号之后作为搜索内容
    # print(htmlcontent.find_all(name=['p', 'li', 'h3', 'h4']))

    textlist = "|".join(
        map(lambda p: re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])", "", p.get_text()),
            htmlcontent.find_all(name=['p', 'h1', 'h3', 'h4','li', "span"])))
    
    # 以h1或者文件名作为标题
    title = htmlcontent.find(name="h1")
    title = title.get_text() if title else path.stem

    lujing = path.relative_to(root).__str__()
    lujing = lujing.replace("\\", "/")
    nr = {
        "title": title,
        "path": lujing,
        "text": textlist
    }
    
    print(nr["path"])
    return nr


if __name__ == "__main__":
    j = []
    target = deque([root])  # type: deque[Path]

    # 递归的遍历文件夹下所有的html文件
    while len(target) > 0:
        file = target.pop()
        # print(file)
        if str(file) in ["search.html", "index.html", '鑫程序化交易系统帮助\___left.htm']:
            continue

        if file.is_dir():
            target.extend(file.iterdir())
        elif file.is_file() and file.suffix in [".html", ".htm"]:
            try:
                j.append(scanfile(file, file.read_bytes()))
            except:
                pass
                print('错误：', file)

    # 将最后的扫描结果和search.js输出到searcher.js
    # html文件中应该包含searcher.js
    with open("searcher.js", "w", encoding='utf-8') as output:
        with open("search.js", "r", encoding='utf-8') as input:
            output.write("let SearchResult = '"+json.dumps(j)+"';\n")
            output.write(input.read())
