from ast import parse
from ctypes import alignment
import requests
from bs4 import BeautifulSoup
import xlwt
import re

response = requests.get("https://wiki.biligame.com/langrisser/技能数据表")
soup = BeautifulSoup(response.text, 'html.parser')
table = soup.select(".mw-parser-output")[0].select("#CardSelectTr")[0]
tr_nodes = table.select("tbody")[0].select(".itemhover")

move = dict.fromkeys((ord(c) for c in u" \n\xa0"))
workbook = xlwt.Workbook(encoding="utf-8")
worksheet = workbook.add_sheet("Skill")
normal_style = xlwt.XFStyle()
alignment = xlwt.Alignment()
alignment.vert = 1
worksheet.write(0, 0, "名称")
worksheet.col(0).width = 4200
worksheet.write(0, 1, "COST")
worksheet.col(1).width = 2600
worksheet.write(0, 2, "类别")
worksheet.col(2).width = 3600
worksheet.write(0, 3, "冷却")
worksheet.col(3).width = 3600
worksheet.write(0, 4, "射程")
worksheet.col(4).width = 3600
worksheet.write(0, 5, "范围")
worksheet.col(5).width = 3600
worksheet.write(0, 6, "技能")
worksheet.col(6).width = 25600
red_font = xlwt.Font()
red_font.colour_index = 0x0A
blue_font = xlwt.Font()
blue_font.colour_index = 0x30
skill_style = xlwt.XFStyle()
alignment = xlwt.Alignment()
alignment.wrap = 1
skill_style.alignment = alignment
regex = r"<font color=\"red\">(.*?)</font>"
count = 1

def parse_skill_node(node, rich_text_list):
    if hasattr(node, "name"):
        if node.name == "font":
            rich_text_list.append((str(node.text), red_font))
        elif node.name == "a":
            match = re.match(r"<a href=\".*?\".*?>(.*?)</a>", str(node), re.MULTILINE)
            rich_text_list.append((str(match.group(1)), blue_font))
        elif node.name == "br":
            pass
        elif node.name == None:
            rich_text_list.append((str(node).translate(move)))
        else:
            for i in node.contents:
                parse_skill_node(i, rich_text_list)
        return

for item in tr_nodes:
    name = item.contents[3].select("tr")[0].th.text.translate(move)
    response = requests.get(f"https://wiki.biligame.com/langrisser/{name}")
    soup = BeautifulSoup(response.text, "html.parser")
    skill_table = soup.select(".wikitable")[0].select("tr")

    skill_name = skill_table[1].contents[3].text.translate(move)
    worksheet.write(count, 0, skill_name, normal_style)
    skill_cost = skill_table[1].contents[5].text.translate(move)
    worksheet.write(count, 1, skill_cost[5:], normal_style)
    skill_type = skill_table[2].contents[1].text.translate(move)
    worksheet.write(count, 2, skill_type[3:], normal_style)
    skill_cd = skill_table[2].contents[3].text.translate(move)
    worksheet.write(count, 3, skill_cd[3:], normal_style)
    skill_distance = skill_table[3].contents[1].text.translate(move)
    worksheet.write(count, 4, skill_distance[3:], normal_style)
    skill_area = skill_table[3].contents[3].text.translate(move)
    worksheet.write(count, 5, skill_area[3:], normal_style)
    skill_desc = skill_table[4].contents[1]
    skill = []
    parse_skill_node(skill_desc, skill)
    worksheet.write_rich_text(count, 6, skill, skill_style)
    skill_owner = soup.select(".wikitable")[1].select("tr")[1].contents[1].contents
    owner = []
    for hero in skill_owner[:-1]:
        match = re.search(r"<a href=\"/langrisser/.*?\" title=\"(.*?)\">", str(hero), re.MULTILINE)
        owner.append(match.group(1))
    worksheet.write(count, 7, ",".join(owner), normal_style)

    print(f"NO.{count:>3}: [{name}]")
    count += 1

workbook.save("LangrisserSkill.xls")
print("Complete!")
