"""
通过API文档的结构（树形结构），提取实体和关系
"""

from bs4 import BeautifulSoup
import shutil
import re
import os
import csv
import sys
from tqdm import tqdm


try:
    shutil.rmtree("csv")
    os.mkdir("csv")
except:
    pass


def open_csv(name):
    return csv.writer(open("csv/{}.csv".format(name), "a", encoding="gb18030", newline=""), doublequote=False, escapechar="\\")

# 定义关系
# intro-c1
include_rel = open_csv("include_rel")
include_rel.writerow([":START_ID", ":END_ID"])
# c2-c1
is_a_rel = open_csv("is_a_rel")
is_a_rel.writerow([":START_ID", ":END_ID"])
# c2-c3
has_rel = open_csv("has_rel")
has_rel.writerow([":START_ID", ":END_ID"])

# 定义实体
intro_ents = open_csv("intro_ents")
intro_ents.writerow(["introId:ID", "ent_name", "ent_des"])
# <div>
c1_ents = open_csv("c1_ents")
c1_ents.writerow(["c1Id:ID", "ent_name"])
# <dl>
c2_ents = open_csv("c2_ents")
c2_ents.writerow(["c2Id:ID", "ent_name", "description"])

c3_ents = open_csv("c3_ents")
c3_ents.writerow(["c3Id:ID", "ent_name", "description"])

# 设置一个计数器作为唯一ID
intro_count = 0
c1_count = 0
c2_count = 0
c3_count = 0

def extract_content_from_html(html_path):

    global intro_count, c1_count, c2_count, c3_count

    with open(html_path, "r", encoding="utf-8", errors="ignore") as html_doc:
        soup = BeautifulSoup(html_doc, "lxml")

    # 获取、存放intro实体名和描述
    sections = soup.find_all("div", attrs={"class":"section"})
    intro_section = sections[0]
    intro_ent = intro_section.get("id")
    intro_des = intro_section.find("p").get_text().replace("\n"," ")
    # print(intro_ent,intro_des)
    introId = "intro_"+str(intro_count)
    intro_ents.writerow([introId, intro_ent, intro_des.replace("\n"," ")])
    intro_count += 1

    # class1 实体
    class1_ents = intro_section.find_all("div", attrs={"class":"section"})
    for class1_ent in class1_ents:
        # print(section.get("id"))
        c1Id = "c1_"+str(c1_count)
        c1_ents.writerow([c1Id, class1_ent.get("id")])
        include_rel.writerow([introId, c1Id])
        c1_count += 1
        # print(section)
        # os.system("pause")
        class2_ents = class1_ent.find_all("dl")
        # class2 实体
        # count作为一个计数器，跳过count个作为class3的dl标签
        count = 0
        for class2_ent in class2_ents:
            if count > 0:
                count -= 1
                continue
            class2_ent_name = class2_ent.find("dt").get("id")
            if class2_ent.find("dd").find("p"):
                class2_ent_des = class2_ent.find("dd").find("p").get_text()
            else:
                class2_ent_des = ""
            # class2_ent_des = class2_ent.find("dd").find("p").get_text()
            c2Id = "c2_"+str(c2_count)
            c2_ents.writerow([c2Id, class2_ent_name, class2_ent_des.replace("\n"," ")])
            is_a_rel.writerow([c2Id, c1Id])
            c2_count += 1
            # print(class2_ent_name, class2_ent_des)
            # os.system("pause")

            # class3实体
            class3_ents = class2_ent.find_all("dl")
            count = len(class3_ents)
            if count > 0:
                for class3_ent in class3_ents:
                    class3_ent_name = class3_ent.find("dt").get("id")
                    class3_ent_des = class3_ent.find("dd").get_text()
                    c3Id = "c3_"+str(c3_count)
                    c3_ents.writerow([c3Id, class3_ent_name, class3_ent_des.replace("\n"," ")])
                    has_rel.writerow([c2Id, c3Id])
                    c3_count += 1
                    # print(class3_ent_name, class3_ent_des)
                    # os.system("pause")

dir_path = sys.argv[1]

html_names = os.listdir(dir_path)
# print(html_names)
for html_name in tqdm(html_names):
    html_path = os.path.join(dir_path, html_name)
    # print(html_path)
    extract_content_from_html(html_path)