#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import json
from html import unescape

def extract_website_data_v2(html_file):
    """从HTML文件中提取网站导航数据 - 改进版"""

    # 读取HTML文件
    with open(html_file, 'r', encoding='utf-8') as f:
        content = f.read()

    # 定义所有已知的分类���题
    known_categories = [
        "免版权图库 & 免扣素材",
        "在线生成酷炫背景",
        "平铺纹理素材",
        "免版权字体",
        "免版权音效",
        "免版权音乐",
        "免版权视频",
        "免版权图标 & 插画"
    ]

    print("查找分类位置...")

    # 找到所有分类标题在HTML中的位置
    category_positions = []
    for category in known_categories:
        # 多种模式匹配分类标题
        patterns = [
            rf'<h3[^>]*id="[^"]*"[^>]*>[^&]*{re.escape(category)}[^<]*</h3>',
            rf'<h3[^>]*>[^&]*{re.escape(category)}[^<]*</h3>',
            rf'<h3[^>]*>\s*&nbsp;\s*[^&]*{re.escape(category)}[^<]*</h3>'
        ]

        for pattern in patterns:
            matches = list(re.finditer(pattern, content, re.IGNORECASE))
            for match in matches:
                category_positions.append((match.start(), category))
                print(f"  找到分类 '{category}' 在位置 {match.start()}")

    # 按位置排序
    category_positions.sort(key=lambda x: x[0])
    print(f"总共找到 {len(category_positions)} 个分类位置")

    # 提取所有网站面板 - 改进模式
    website_patterns = [
        r'<a href="([^"]+)"\s+target="_blank">\s*<div class="uk-panel uk-panel-box webpanel uk-panel-hover">\s*<img src="([^"]+)"[^>]*>\s*<h4>([^<]+)</h4>\s*<p>([^<]+)</p>(.*?)</div>\s*</a>',
        r'<a href="([^"]+)"[^>]*target="_blank">\s*<div class="uk-panel[^>]*webpanel[^>]*">\s*<img src="([^"]+)"[^>]*>\s*<h4>([^<]+)</h4>\s*<p>([^<]+)</p>(.*?)</div>\s*</a>'
    ]

    websites = []
    for pattern in website_patterns:
        found = re.findall(pattern, content, re.DOTALL)
        if found:
            websites.extend(found)

    print(f"找到 {len(websites)} 个网站")

    # 处理网站数据
    result = {category: [] for category in known_categories}
    result["其他"] = []  # 兜底分类

    for i, (url, img, title, desc, badge_html) in enumerate(websites):
        # 提取徽章/标签
        badge_pattern = r'<div class="uk-badge[^>]*>([^<]+)</div>'
        badges = re.findall(badge_pattern, badge_html)
        badges = [badge.strip() for badge in badges if badge.strip()]

        # 确定大分类 - 基于位置判断
        # 找到这个网站在HTML中的位置
        positions_to_check = [
            content.find(f'<h4>{title}</h4>'),
            content.find(f'href="{url}"'),
            content.find(img)
        ]

        pos = -1
        for p in positions_to_check:
            if p != -1:
                pos = p
                break

        current_category = "其他"  # 默认分类

        if pos != -1:
            # 找到在这个位置之前最近的分类
            last_category = known_categories[0] if known_categories else "其他"
            for cat_pos, cat_title in category_positions:
                if cat_pos < pos:
                    last_category = cat_title
                else:
                    break
            current_category = last_category

        website_data = {
            "url": url.strip(),
            "title": title.strip(),
            "description": desc.strip(),
            "cover_image": img.strip(),
            "badges": badges,
            "category": current_category
        }

        result[current_category].append(website_data)

        if i < 10:  # 显示前10个用于调试
            print(f"\n网站 {i+1}:")
            print(f"  标题: {title}")
            print(f"  网址: {url}")
            print(f"  分类: {current_category}")
            print(f"  标签: {badges}")

    return result

def main():
    html_file = 'g:/claude-code-repo/design-nav/ilovefree/PICKFREE - 免费资源导航_免版权图片_免费商用字体_免费音频、视频等设计素材下载.html'

    print("开始提取网站数据...")
    data = extract_website_data_v2(html_file)

    # 统计每个分类的网站数量
    print("\n分类统计:")
    total_sites = 0
    for category, websites in data.items():
        if websites:  # 只显示非空分类
            print(f"  {category}: {len(websites)} 个网站")
            total_sites += len(websites)

    print(f"\n总计: {total_sites} 个网站")

    # 保存为JSON文件
    output_file = 'g:/claude-code-repo/design-nav/website_data_complete.json'
    with open(output_file, 'w', encoding='utf-8') as f:
        # 过滤掉空分类
        filtered_data = {k: v for k, v in data.items() if v}
        json.dump(filtered_data, f, ensure_ascii=False, indent=2)

    print(f"数据已保存到: {output_file}")

    # 显示每个分类的第一个网站预览
    print("\n分类预览:")
    for category, websites in data.items():
        if websites:
            print(f"\n=== {category} ({len(websites)} 个网站) ===")
            website = websites[0]
            print(f"标题: {website['title']}")
            print(f"网址: {website['url']}")
            print(f"描述: {website['description'][:60]}...")
            print(f"标签: {', '.join(website['badges'])}")
            print(f"封面: {website['cover_image'].split('/')[-1]}")  # 只显示文件名

if __name__ == "__main__":
    main()