import json
import re
import traceback
from bs4 import BeautifulSoup, NavigableString
import os
import shutil

def get_text_safe(element):
    if element:
        return element.get_text(strip=True)
    return ""

def get_rarity_from_tag(tag):
    if not tag: return 0
    if div := tag.find('div', class_=re.compile(r'rar_bg_(\d)')):
        for class_name in div.get('class', []):
            if match := re.search(r'rar_bg_(\d)', class_name):
                return int(match.group(1))
    return 0

def parse_weapon_html(html_file_path: str) -> dict:
    with open(html_file_path, 'r', encoding='utf-8') as f:
        soup = BeautifulSoup(f, 'lxml')

    data = { "id": None, "name": "", "affixTitle": "", "star": 0, "desc": "", "attr": { "atk": {}, "bonusKey": "", "bonusData": {} }, "materials": { "weapon": "", "monster": "", "normal": "" }, "affixData": { "text": "", "datas": {} } }

    if main_table := soup.find('table', class_='main_table'):
        info = {}
        for row in main_table.find('tbody').find_all('tr'):
            cells = row.find_all('td')
            key_cell, value_cell = (None, None)
            if len(cells) == 3 and cells[0].has_attr('rowspan'): key_cell, value_cell = cells[1], cells[2]
            elif len(cells) == 2: key_cell, value_cell = cells[0], cells[1]
            if key_cell and value_cell and (key := key_cell.get_text(strip=True)): info[key] = value_cell

        data['name'] = get_text_safe(info.get('Name'))
        data['affixTitle'] = get_text_safe(info.get('Weapon Affix'))
        data['desc'] = get_text_safe(info.get('Description'))
        if rarity_cell := info.get('Rarity'): data['star'] = len(rarity_cell.find_all('img', alt='Raritystr'))
        substat_map = { "Critical Damage %": "cdmg", "Critical Rate %": "cpct", "ATK %": "atkPct", "HP %": "hpPct", "DEF %": "defPct", "Energy Recharge %": "recharge", "Elemental Mastery": "mastery", "Physical DMG Bonus %": "phy" }
        if substat_type_str := get_text_safe(info.get('Substat Type')): data['attr']['bonusKey'] = substat_map.get(substat_type_str, substat_type_str)

    if id_img := soup.find('img', {'src': re.compile(r'i_n\d+_gacha_icon_w145\.webp')}):
        if match := re.search(r'i_n(\d+)', id_img.get('src', '')): data['id'] = int(match.group(1))

    if mats_cell := info.get('Weapon Ascension Materials'):
        all_mat_links = mats_cell.find_all('a', href=re.compile(r'/i_n\d+/'))

        weapon_mat_links = [a for a in all_mat_links if 'i_n114' in a['href']]
        if highest_weapon_mat := max(weapon_mat_links, key=get_rarity_from_tag, default=None):
            if img := highest_weapon_mat.find('img'): data['materials']['weapon'] = img.get('alt', '')

        enemy_mat_links = [a for a in all_mat_links if 'i_n112' in a['href']]
        mat_groups = {}
        for mat in enemy_mat_links:
            if match := re.search(r'i_n(\d+)', mat['href']):
                group_prefix = match.group(1)[:5]
                if group_prefix not in mat_groups: mat_groups[group_prefix] = []
                mat_groups[group_prefix].append(mat)
        
        elite_group, common_group = [], []
        for links in mat_groups.values():
            if any(get_rarity_from_tag(a) >= 4 for a in links): elite_group.extend(links)
            else: common_group.extend(links)
        
        if highest_elite := max(elite_group, key=get_rarity_from_tag, default=None):
            if img := highest_elite.find('img'): data['materials']['monster'] = img.get('alt', '')
        if highest_common := max(common_group, key=get_rarity_from_tag, default=None):
            if img := highest_common.find('img'): data['materials']['normal'] = img.get('alt', '')

    if stats_table := soup.find('table', class_='stat_table'):
        if tbody := stats_table.find('tbody'):
            for row in tbody.find_all('tr'):
                if len(cells := row.find_all('td')) < 3: continue
                level, atk_str, bonus_str = get_text_safe(cells[0]), get_text_safe(cells[1]), get_text_safe(cells[2]).replace('%', '')
                try:
                    if level:
                        data['attr']['atk'][level] = float(atk_str)
                        data['attr']['bonusData'][level] = float(bonus_str)
                except (ValueError, TypeError): continue

    if affix_table := soup.find('table', class_='affix_table'):
        if tbody := affix_table.find('tbody'):
            if rows := tbody.find_all('tr'):
                all_refine_values = []
                for row in rows:
                    values_for_row = []
                    desc_cell = row.find_all('td')[1]
                    for tag in desc_cell.find_all('color'):
                        first_text = next((s for s in tag.contents if isinstance(s, NavigableString) and s.strip()), "")
                        if match := re.search(r'(\d+%)', str(first_text)):
                            values_for_row.append(match.group(1))
                    all_refine_values.append(values_for_row)
                
                if all_refine_values and all(len(v) == len(all_refine_values[0]) for v in all_refine_values):
                    template = rows[0].find_all('td')[1].get_text(strip=True)
                    r1_values = all_refine_values[0]
                    for i, val in enumerate(r1_values):
                        template = template.replace(val, f'$[{i}]', 1)
                    data['affixData']['text'] = template

                    transposed_values = list(map(list, zip(*all_refine_values)))
                    for i, val_list in enumerate(transposed_values):
                        data['affixData']['datas'][str(i)] = val_list
    return data

def main():
    html_files = [f for f in os.listdir('.') if f.lower().endswith('.html')]
    if not html_files:
        print("当前目录下没有找到 HTML 文件。")
        return
    for input_html_file in html_files:
        try:
            weapon_data = parse_weapon_html(input_html_file)
            weapon_name = weapon_data.get('name', '未知武器')
            safe_weapon_name = weapon_name.replace('/', '_').replace('\\', '_')
            if not os.path.exists(safe_weapon_name):
                os.makedirs(safe_weapon_name)
            output_json_path = os.path.join(safe_weapon_name, 'data.json')
            with open(output_json_path, 'w', encoding='utf-8') as f:
                json.dump(weapon_data, f, ensure_ascii=False, indent=2)
            print(f"成功解析 '{input_html_file}'，数据已保存到 '{output_json_path}'")
        except Exception as e:
            print(f"解析 '{input_html_file}' 时出错: {e}")
            traceback.print_exc()
    print("所有数据已按武器名分别保存到对应文件夹的 data.json 文件中。")

if __name__ == '__main__':
    main()