from bs4 import BeautifulSoup
import shutil
import re
import os
import csv

# try:
#     shutil.rmtree("csv")
# except:
#     pass
# os.mkdir("csv")

# def open_csv(name):
#     return csv.writer(open("csv/{}.csv".format(name), "a", newline=""), doublequote=False, escapechar="\\")

# # 定义关系
# # intro-c1
# include_rel = open_csv("include_rel")
# include_rel.writerow([":START_ID", ":END_ID"])
# # c2-c1
# is_a_rel = open_csv("is_a_rel")
# is_a_rel.writerow([":START_ID", ":END_ID"])
# # c2-c3
# has_rel = open_csv("has_rel")
# has_rel.writerow([":START_ID", ":END_ID"])

# # 定义实体
# intro_ents = open_csv("intro_ents")
# intro_ents.writerow(["introId:ID", "ent_name", "ent_des"])
# # <div>
# c1_ents = open_csv("c1_ents")
# c1_ents.writerow(["c1Id:ID", "ent_name"])
# # <dl>
# c2_ents = open_csv("c2_ents")
# c2_ents.writerow(["c2Id:ID", "ent_name", "description"])

# c3_ents = open_csv("c3_ents")
# c3_ents.writerow(["c3Id:ID", "ent_name", "description"])

# intro_count = 0
# c1_count = 0
# c2_count = 0
# c3_count = 0

# with open("D:/Chrome/Dl/python362/library/exceptions.html", "r", encoding="utf-8", errors="ignore") as html_doc:
#     soup = BeautifulSoup(html_doc, "lxml")

# # 获取、存放intro实体名和描述
# sections = soup.find_all("div", attrs={"class":"section"})
# intro_section = sections[0]
# intro_ent = intro_section.get("id")
# intro_des = intro_section.find("p").get_text().replace("\n"," ")
# # print(intro_ent,intro_des)
# introId = "intro_"+str(intro_count)
# intro_ents.writerow([introId, intro_ent, intro_des.replace("\n"," ")])
# intro_count += 1

# # class1 实体
# class1_ents = intro_section.find_all("div", attrs={"class":"section"})
# for class1_ent in class1_ents:
#     # print(section.get("id"))
#     c1Id = "c1_"+str(c1_count)
#     c1_ents.writerow([c1Id, class1_ent.get("id")])
#     include_rel.writerow([introId, c1Id])
#     c1_count += 1
#     # print(section)
#     # os.system("pause")
#     class2_ents = class1_ent.find_all("dl")
#     # class2 实体
#     # count作为一个计数器，跳过count个作为class3的dl标签
#     count = 0
#     for class2_ent in class2_ents:
#         if count > 0:
#             count -= 1
#             continue
#         class2_ent_name = class2_ent.find("dt").get("id")
#         if class2_ent.find("dd").find("p"):
#             class2_ent_des = class2_ent.find("dd").find("p").get_text()
#         else:
#             class2_ent_des = ""
#         # class2_ent_des = class2_ent.find("dd").find("p").get_text()
#         c2Id = "c2_"+str(c2_count)
#         c2_ents.writerow([c2Id, class2_ent_name, class2_ent_des.replace("\n"," ")])
#         is_a_rel.writerow([c2Id, c1Id])
#         c2_count += 1
#         # print(class2_ent_name, class2_ent_des)
#         # os.system("pause")

#         # class3实体
#         class3_ents = class2_ent.find_all("dl")
#         count = len(class3_ents)
#         if count > 0:
#             for class3_ent in class3_ents:
#                 class3_ent_name = class3_ent.find("dt").get("id")
#                 class3_ent_des = class3_ent.find("dd").get_text()
#                 c3Id = "c3_"+str(c3_count)
#                 c3_ents.writerow([c3Id, class3_ent_name, class3_ent_des.replace("\n"," ")])
#                 has_rel.writerow([c2Id, c3Id])
#                 c3_count += 1
#                 # print(class3_ent_name, class3_ent_des)
#                 # os.system("pause")
# from py2neo import Graph,Node,Relationship
# # 连接neo4j数据库，输入地址、用户名、密码
# graph = Graph('http://localhost:7474',username='neo4j',password='rvli')

# res = graph.run("MATCH (n)  where n.introId = 'intro_90' return n").data()  # table

# print(len(res))

def get_contents_of_tag(tag): 
    try:
        contents = tag.contents
    except:
        return tag
    while '\n' in contents:
        contents.remove('\n')
    print(contents)
    os.system("pause")
    content_list = []
    for i in range(len(contents)):
        content_list.append(get_contents_of_tag(contents[i]))
    return content_list

# html_path = "D:/Chrome/Dl/python362/library/math.html"

# with open(html_path, "r", encoding="utf-8", errors="ignore") as html_doc:
#     soup = BeautifulSoup(html_doc, "lxml")


html = """
<html>
 <head>
  <title>The Dormouse's story</title>
 </head> 
 <body> 
  <p class="title"><b>The Dormouse's story</b></p> 
  <p class="story">Once upon a time there were three little sisters; and their names were 
    <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, 
    <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and 
    <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.
  </p> 
  <p class="story">...</p>
 </body>
</html>
"""

soup = BeautifulSoup(html, "lxml")


body = soup.head

# print(body.contents)
for i in range(len(body.contents)):
    print("The {} is ".format(i), body.contents[i])

# print(body_tag.contents[0:4])
# print(body_tag.contents[1].contents)
# body_contents = get_contents_of_tag(body.contents[1])
# print(body_contents)
# print(len(body.contents))
# print(len(body.contents[1]))
# for i in range(len(body.contents[1])):
#     print(body.contents[1][i])
# for child in body.contents[1].descendants:
#     print(child)
    


