import sys
sys.path.append("../")
import codecs
import os

from lxml import etree
from bs4 import BeautifulSoup
from common import common

ignored_folder_list = [
    "无需下载链接",
    "书签收藏",
    # "需手工导入",
    "已归档书签",
]


def recursiveParse(parentElem, level, parentPath):
    elem_h3 = parentElem.find("h3", recursive=False)
    if elem_h3:
        print(elem_h3.text)
        print(f"build folder {elem_h3.text}")
        if elem_h3.text in ignored_folder_list:
            return "None", "None", "None"
        if not os.path.isdir(os.path.join(parentPath, elem_h3.text)):
            os.mkdir(os.path.join(parentPath, elem_h3.text))
    elem_list_dl = parentElem.find_all("dl", recursive=False)
    if len(elem_list_dl) <= 0:  # leaf
        return parentElem.a.attrs["href"], parentElem.a.text, level
    else:
        for elem_dl in elem_list_dl:  # JM: 这里就一个
            elem_list_dt = elem_dl.find_all("dt", recursive=False)
            link_list = []
            for elem_dt in elem_list_dt:
                link, text, cur_level = recursiveParse(elem_dt, level + 1, os.path.join(parentPath, elem_h3.text))
                if link != "None":
                    link_list.append(link)
                # if elem_h3 and elem_h3.text == "开发实践":
                #     # print(f"{cur_level} - {text} - {link}")
                #     print(f"{link}")
            common.write_url_list(os.path.join(parentPath, elem_h3.text, "url_list.txt"), link_list)
            # with codecs.open(os.path.join(parentPath, elem_h3.text, "url_list.txt"), mode='w', encoding='utf-8') as f:
            #     f.writelines([link + "\n" for link in link_list])
            print(f"flushing to folder {os.path.join(parentPath, elem_h3.text)}, links {len(link_list)}")
            return "None", "None", "None"


def make_list(vault_root_folder_path, bookmark_file_path):
    ### 以下是程序代码
    if not os.path.isdir(vault_root_folder_path):
        os.mkdir(vault_root_folder_path)
    ### JM：cubox导出的html要用chrome打开后右键Save as做一下标准化后才能正确解析
    with codecs.open(bookmark_file_path, mode='r', encoding='utf-8') as f:
        content = f.read()
    soup = BeautifulSoup(content, 'html.parser')
    roots = soup.select("body > dl > dt")
    for root in roots:
        recursiveParse(root, 1, vault_root_folder_path)


if __name__ == "__main__":
    """
    这个脚本根据cubox导出的收藏夹文件，递归的生成WebArchive Vault的目录结构
    设置脚本参数：
    * archive_vault_root_folder: 指定WebArchive Vault的根目录用来生成目录结构
    * cubox_fav_path_to_read: 指定cubox导出的文件路径
    生成内容（返回值）：
    * 生成WebArchive Vault的目录结构
    * 在每个子目录中会生成一份url_list.txt
    """
    archive_vault_root_folder_path = "E:/WebArchivesVault"
    cubox_fav_file_path = "E:/Bookmarks.html"
    make_list(archive_vault_root_folder_path, cubox_fav_file_path)
    # ### 以下是程序代码
    # if not os.path.isdir(archive_vault_root_folder_path):
    #     os.mkdir(archive_vault_root_folder_path)
    # ### JM：cubox导出的html要用chrome打开后右键Save as做一下标准化后才能正确解析
    # with codecs.open(cubox_fav_file_path, mode='r', encoding='utf-8') as f:
    #     content = f.read()
    # soup = BeautifulSoup(content, 'html.parser')
    # roots = soup.select("body > dl > dt")
    # for root in roots:
    #     recursiveParse(root, 1, archive_vault_root_folder_path)