#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import json
from html import unescape

def extract_website_data(html_file):
    """从HTML文件中提取网站导航数据"""

    # 读取HTML文件
    with open(html_file, 'r', encoding='utf-8') as f:
        content = f.read()

    # 提取大分类 - 改进模式匹配
    category_patterns = [
        r'<h3[^>]*id="([^"]+)"[^>]*>\s*&nbsp;\s*([^&<]+)(?:\s*&amp;\s*([^<]+))?\s*</h3>',
        r'<h3[^>]*id="([^"]+)"[^>]*>([^&<]+)(?:\s*&amp;\s*([^<]+))?</h3>',
        r'<h3[^>]*>([^&<]+)(?:\s*&amp;\s*([^<]+))?</h3>'
    ]

    categories = []
    for pattern in category_patterns:
        cats = re.findall(pattern, content)
        if cats:
            categories.extend(cats)

    # 手动添加已知的分类标题
    known_categories = [
        ("免版权图库 & 免扣素材", "免版权图库 & 免扣素材"),
        ("在线生成酷炫背景", "在线生成酷炫背景"),
        ("平铺纹理素材", "平铺纹理素材"),
        ("免版权字体", "免版权字体"),
        ("免版权音效", "免版权音效"),
        ("免版权音乐", "免版权音乐"),
        ("免版权视频", "免版权视频"),
        ("免版权图标 & 插画", "免版权图标 & 插画")
    ]

    # 构建分类映射
    category_map = {}

    # 添加已知分类
    for i, cat_title in enumerate(known_categories):
        category_map[f"category_{i}"] = cat_title[0]

    for cat_id, title, subtitle in categories:
        full_title = title.strip()
        if subtitle:
            full_title += f" & {subtitle.strip()}"
        category_map[cat_id] = full_title
        print(f"分类 ID: {cat_id}, 标题: {full_title}")

    # 提取所有网站面板
    website_pattern = r'<a href="([^"]+)"\s+target="_blank">\s*<div class="uk-panel uk-panel-box webpanel uk-panel-hover">\s*<img src="([^"]+)"[^>]*>\s*<h4>([^<]+)</h4>\s*<p>([^<]+)</p>(.*?)</div>\s*</a>'
    websites = re.findall(website_pattern, content, re.DOTALL)

    print(f"\n找到 {len(websites)} 个网站")

    # 处理网站数据
    result = {}

    # 初始化所有分类
    for cat_id, cat_title in category_map.items():
        result[cat_title] = []

    for i, (url, img, title, desc, badge_html) in enumerate(websites):
        # 提取徽章/标签
        badge_pattern = r'<div class="uk-badge[^>]*>([^<]+)</div>'
        badges = re.findall(badge_pattern, badge_html)
        badges = [badge.strip() for badge in badges if badge.strip()]

        # 确定大分类 - 查找前面的h3标题
        # 找到这个网站在HTML中的位置
        website_marker = f'<h4>{title}</h4>'
        pos = content.find(website_marker)
        if pos == -1:
            website_marker = f'href="{url}"'
            pos = content.find(website_marker)

        current_category = "未分类"
        if pos != -1:
            # 查找前面最近的h3标题
            before_content = content[:pos]
            h3_matches = list(re.finditer(r'<h3[^>]*id="([^"]+)"', before_content))
            if h3_matches:
                last_h3_id = h3_matches[-1].group(1)
                if last_h3_id in category_map:
                    current_category = category_map[last_h3_id]

        website_data = {
            "url": url.strip(),
            "title": title.strip(),
            "description": desc.strip(),
            "cover_image": img.strip(),
            "badges": badges,
            "category": current_category
        }

        if current_category not in result:
            result[current_category] = []

        result[current_category].append(website_data)

        if i < 5:  # 只显示前5个用于调试
            print(f"\n网站 {i+1}:")
            print(f"  标题: {title}")
            print(f"  网址: {url}")
            print(f"  分类: {current_category}")
            print(f"  标签: {badges}")

    return result

def main():
    html_file = 'g:/claude-code-repo/design-nav/ilovefree/PICKFREE - 免费资源导航_免版权图片_免费商用字体_免费音频、视频等设计素材下载.html'

    print("开始提取网站数据...")
    data = extract_website_data(html_file)

    # 统计每个分类的网站数量
    print("\n分类统计:")
    for category, websites in data.items():
        print(f"  {category}: {len(websites)} 个网站")

    # 保存为JSON文件
    output_file = 'g:/claude-code-repo/design-nav/website_data.json'
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)

    print(f"\n数据已保存到: {output_file}")

    # 显示部分数据预览
    print("\n数据预览:")
    for category, websites in list(data.items())[:2]:  # 只显示前2个分类
        if websites:
            print(f"\n=== {category} ===")
            for website in websites[:2]:  # 每个分类显示前2个网站
                print(f"标题: {website['title']}")
                print(f"网址: {website['url']}")
                print(f"描述: {website['description'][:50]}...")
                print(f"标签: {', '.join(website['badges'])}")
                print("-" * 50)

if __name__ == "__main__":
    main()