#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Comprehensive extraction of all navigation data from PICKFREE HTML file
This script will systematically parse through the entire HTML file and extract all website entries
"""

import re
import json
from bs4 import BeautifulSoup
from typing import Dict, List, Any

def extract_comprehensive_data(html_file_path: str) -> Dict[str, Any]:
    """
    Extract comprehensive data from the PICKFREE HTML file
    """

    # Read the HTML file with proper encoding
    with open(html_file_path, 'r', encoding='utf-8') as file:
        html_content = file.read()

    soup = BeautifulSoup(html_content, 'html.parser')

    # Define all categories based on the structure we found
    categories_info = [
        {"id": "tuku", "name": "免版权图库 & 免扣素材", "has_h3": False},
        {"id": "beijing", "name": "在线生成酷炫背景", "has_h3": True},
        {"id": "wenli", "name": "平铺纹理素材", "has_h3": True},
        {"id": "font", "name": "免版权字体", "has_h3": True},
        {"id": "yinxiao", "name": "免版权音效", "has_h3": True},
        {"id": "yinyue", "name": "免版权音乐", "has_h3": True},
        {"id": "shipin", "name": "免版权视频", "has_h3": True},
        {"id": "tubiao", "name": "免版权图标 & 插画", "has_h3": True},
        {"id": "gongju", "name": "在线工具，提高你的效率", "has_h3": True},
        {"id": "xuexi", "name": "学习", "has_h3": True},
        {"id": "xinxian", "name": "新鲜", "has_h3": True}
    ]

    # Extract data for each category
    extracted_categories = []

    for i, category_info in enumerate(categories_info):
        category_id = category_info["id"]
        category_name = category_info["name"]
        has_h3 = category_info["has_h3"]

        print(f"Processing category: {category_name}")

        # Find the start point for this category
        start_element = None

        if has_h3:
            # Find the h3 tag for this category
            start_element = soup.find('h3', {'id': category_id})
        else:
            # Special case for first category - find the ul with id="tuku"
            start_element = soup.find('ul', {'id': category_id})

        if not start_element:
            print(f"Warning: Category {category_name} not found in HTML")
            continue

        # Find all website entries after this category until the next category
        websites = []
        current_element = start_element.find_next()

        # Determine the end point (next category h3 or end of relevant section)
        if i < len(categories_info) - 1:
            next_category_id = categories_info[i + 1]["id"]
            if categories_info[i + 1]["has_h3"]:
                end_element = soup.find('h3', {'id': next_category_id})
            else:
                end_element = soup.find('ul', {'id': next_category_id})
        else:
            end_element = None

        while current_element and current_element != end_element:
            # Look for website entries - handle all patterns found
            element_classes = current_element.get('class', [])
            is_website_entry = (
                current_element.name == 'div' and (
                    'uk-width-medium-1-5' in element_classes or  # Main pattern for images/icons
                    ('uk-width-medium-1-3' in element_classes and 'xiaotu' in element_classes) or  # Audio/music pattern
                    'uk-width-medium-1-3' in element_classes or  # Video pattern (without xiaotu)
                    ('uk-width-medium-1-4' in element_classes and 'xiaotu' in element_classes) or  # Tools pattern
                    'uk-width-medium-1-4' in element_classes  # Learning/Fresh category pattern
                )
            )

            if is_website_entry:
                website_data = extract_website_data(current_element)
                if website_data:
                    websites.append(website_data)

            current_element = current_element.find_next()

        if websites:
            category_data = {
                "name": category_name,
                "id": category_id,
                "description": get_category_description(category_name),
                "sites": websites
            }
            extracted_categories.append(category_data)
            print(f"  Found {len(websites)} websites in {category_name}")
        else:
            print(f"  No websites found in {category_name}")

    # Create the final JSON structure
    result = {
        "categories": extracted_categories,
        "metadata": {
            "total_sites": sum(len(cat["sites"]) for cat in extracted_categories),
            "total_categories": len(extracted_categories),
            "last_updated": "2025-01-07",
            "source": "PICKFREE - 免费资源导航",
            "description": "免费可商用的设计素材资源导航网站数据 - 完整版"
        }
    }

    return result

def extract_website_data(div_element) -> Dict[str, Any]:
    """
    Extract data from a single website div element
    """
    try:
        # Find the link
        link = div_element.find('a', {'target': '_blank'})
        if not link:
            return None

        url = link.get('href', '')

        # Find the panel div
        panel = div_element.find('div', class_='uk-panel')
        if not panel:
            return None

        # Extract image
        img = panel.find('img')
        cover_image = img.get('src', '') if img else ''

        # Extract title (h4 tag)
        title_elem = panel.find('h4')
        title = title_elem.get_text(strip=True) if title_elem else ''

        # Extract description (p tag)
        desc_elem = panel.find('p')
        description = desc_elem.get_text(strip=True) if desc_elem else ''

        # Extract tags/badges
        badges = []
        badge_divs = panel.find_all('div', class_='uk-badge-hui')
        for badge in badge_divs:
            badge_text = badge.get_text(strip=True)
            if badge_text:
                badges.append(badge_text)

        # Get filter tags from data-uk-filter attribute
        filter_tags = div_element.get('data-uk-filter', '').split(',')
        filter_tags = [tag.strip() for tag in filter_tags if tag.strip()]

        return {
            "url": url,
            "title": title,
            "description": description,
            "cover_image": cover_image,
            "badges": badges,
            "filter_tags": filter_tags
        }

    except Exception as e:
        print(f"Error extracting website data: {e}")
        return None

def get_category_description(category_name: str) -> str:
    """
    Get description for a category based on its name
    """
    descriptions = {
        "免版权图库 & 免扣素材": "免费可商用的图片资源和免扣素材",
        "在线生成酷炫背景": "在线生成各种炫酷背景的工具",
        "平铺纹理素材": "平铺纹理和背景素材资源",
        "免版权字体": "免费可商用的中英文字体资源",
        "免版权音效": "免费可商用的音效资源",
        "免版权音乐": "免费可商用的音乐资源",
        "免版权视频": "免费可商用的视频素材资源",
        "免版权图标 & 插画": "免费可商用的图标和插画资源",
        "在线工具，提高你的效率": "提高设计效率的在线工具",
        "学习": "设计学习和教程资源",
        "新鲜": "最新发现的设计资源和工具"
    }
    return descriptions.get(category_name, "")

def main():
    """
    Main function to run the comprehensive extraction
    """
    html_file = "g:/claude-code-repo/design-nav/ilovefree/PICKFREE - 免费资源导航_免版权图片_免费商用字体_免费音频、视频等设计素材下载.html"
    output_file = "g:/claude-code-repo/design-nav/ilovefree/sites_complete.json"

    print("Starting comprehensive extraction...")
    print(f"Input file: {html_file}")

    # Extract all data
    result = extract_comprehensive_data(html_file)

    # Save to JSON file
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(result, f, ensure_ascii=False, indent=2)

    print(f"\nExtraction completed!")
    print(f"Total categories found: {result['metadata']['total_categories']}")
    print(f"Total websites found: {result['metadata']['total_sites']}")
    print(f"Output saved to: {output_file}")

    # Print summary by category
    print("\nSummary by category:")
    for category in result['categories']:
        print(f"  {category['name']}: {len(category['sites'])} websites")

if __name__ == "__main__":
    main()