import json
from bs4 import BeautifulSoup
import re

def parse_house_listings(file_path: str):
    """
    Parses an HTML file to extract detailed information from house listings.

    Args:
        file_path: The path to the HTML file (e.g., "list.html").
    """
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
    except FileNotFoundError:
        print(f"Error: The file '{file_path}' was not found.")
        return

    soup = BeautifulSoup(content, 'html.parser')
    
    # Find all list items. The selector targets <li> elements within a <ul> with class 'sellListContent'.
    list_items = soup.select('ul.sellListContent li.clear')
    
    if not list_items:
        print("No house listings found using the selector 'ul.sellListContent li.clear'.")
        return

    print(f"Found {len(list_items)} house listings. Extracting details...")
    
    houses_data = []
    for item in list_items:
        house = {}

        # --- Extracting data using selectors ---
        
        # Title and URL
        title_tag = item.select_one('.title a')
        if title_tag:
            house['title'] = title_tag.get('title', '').strip()
            house['url'] = title_tag.get('href', '')

        # Community/Neighborhood
        position_tag = item.select_one('.positionInfo a')
        if position_tag:
            house['community'] = position_tag.get_text(strip=True)

        # House Info (Layout, Area, etc.)
        house_info_tag = item.select_one('.houseInfo')
        if house_info_tag:
            house_info = house_info_tag.get_text(strip=True)

            # 提取楼层信息
            floor_pattern = r'([高低中]楼层)\s*\((共\d+层)\)'
            floor_info = re.search(floor_pattern, house_info)
            house['floor_level'] = floor_info.group(1) if floor_info else ''
            house['total_floors'] = floor_info.group(2) if floor_info else ''

            # 移除楼层信息，保留户型、面积、朝向
            remaining_info = re.sub(floor_pattern, '', house_info).strip()

            # 分割剩余信息
            details = re.split(r'\s*\|\s*', remaining_info)

            # 提取各部分信息
            house['bedroom'] = details[0].strip() if len(details) > 0 else ''
            house['area'] = details[1].strip() if len(details) > 1 else ''
            house['direction'] = details[2].strip() if len(details) > 2 else ''

        # Follow Info (Followers, Listing Date)
        follow_info_tag = item.select_one('.followInfo')
        if follow_info_tag:
            follow_text = follow_info_tag.get_text(strip=True)
            parts = [p.strip() for p in follow_text.split('/') if p.strip()]
            if len(parts) == 2:
                house['followers'] = parts[0]
                house['list_date'] = parts[1]

        # Tags (e.g., "满五年", "VR房源")
        tag_elements = item.select('.tag span')
        house['tags'] = [tag.get_text(strip=True) for tag in tag_elements]

        # Price Info
        total_price_tag = item.select_one('.totalPrice span')
        if total_price_tag:
            house['total_price'] = total_price_tag.get_text(strip=True) + "万"

        unit_price_tag = item.select_one('.unitPrice span')
        if unit_price_tag:
            house['unit_price'] = unit_price_tag.get_text(strip=True)

        # Only add if we successfully extracted a URL
        if house.get('url'):
            houses_data.append(house)

    # --- Print the final structured data ---
    if houses_data:
        print("\n--- Extracted House Data ---")
        # Use json.dumps for pretty printing the list of dictionaries
        print(json.dumps(houses_data, indent=2, ensure_ascii=False))
    else:
        print("Could not extract any valid house data.")


if __name__ == "__main__":
    html_file = "list.html"
    detail_file = "detail.html"
    parse_house_listings(html_file)
