import asyncio
import json
import time
from playwright.sync_api import sync_playwright
from bs4 import BeautifulSoup
import random
import csv
import os
import re

# 区域和资产列表
AREAS = [
    "jiangan", "jianghan", "qiaokou", "dongxihuqu", "wuchang", 
    "qingshan", "hongshan", "hanyang", "donghugaoxin", "jiangxia"
]
ASSETS = ["3", "4"]
BASE_URL = "https://wh.ke.com/ershoufang/{area}/pg{pageNum}dp1sf1y2l3l4l5l6p{asset}/"
# 登录页面地址，你可以根据实际情况调整
LOGIN_URL = "https://wh.ke.com"

def get_page_content(page, url):
    """
    使用 Playwright 获取页面内容
    :param page: Playwright 页面对象
    :param url: 请求的 URL
    :return: 页面 HTML 内容
    """
    try:
        # 自定义网络空闲等待时间为 1500 毫秒
        page.goto(url, wait_until='networkidle', timeout=15000)
        # 显式等待页码元素加载，假设页码元素的类名为 house-lst-page-box
        page.wait_for_selector('.house-lst-page-box', timeout=10000)  # 最多等待 10 秒
        # 随机等待 2 - 6 秒，模拟人类浏览行为
        time.sleep(random.uniform(2, 6))
        
        # 检查是否触发人机验证
        if page.locator('selector_for_captcha_element').count() > 0:
            print("触发了人机验证，请在浏览器中手动完成验证，完成后按回车键继续...")
            input()
            # 重新加载页面
            page.goto(url, wait_until='networkidle', timeout=15000)
            page.wait_for_selector('.house-lst-page-box', timeout=10000)
            time.sleep(random.uniform(2, 6))
        
        return page.content()
    except Exception as e:
        print(f"请求 {url} 出错: {e}")
        return None

def extract_house_info(html_content):
    """
    从 HTML 内容中提取房屋简介信息
    :param html_content: 页面 HTML 内容
    :return: 房屋信息列表
    """
    if not html_content:
        return []
    soup = BeautifulSoup(html_content, 'html.parser')
    house_list = []
    list_items = soup.select('ul.sellListContent li.clear')
    for item in list_items:
        house = {}
        title_tag = item.select_one('.title a')
        if title_tag:
            house['title'] = title_tag.get('title', '').strip()
            house['url'] = title_tag.get('href', '')
        position_tag = item.select_one('.positionInfo a')
        if position_tag:
            house['community'] = position_tag.get_text(strip=True)

        # House Info (Layout, Area, etc.)
        house_info_tag = item.select_one('.houseInfo')
        if house_info_tag:
            house_info = house_info_tag.get_text(strip=True)

            # 提取楼层信息
            floor_pattern = r'([高低中]楼层)\s*\((共\d+层)\)'
            floor_info = re.search(floor_pattern, house_info)
            house['floor_level'] = floor_info.group(1) if floor_info else ''
            house['total_floors'] = floor_info.group(2) if floor_info else ''

            # 移除楼层信息，保留户型、面积、朝向
            remaining_info = re.sub(floor_pattern, '', house_info).strip()

            # 分割剩余信息
            details = re.split(r'\s*\|\s*', remaining_info)

            # 提取各部分信息
            house['bedroom'] = details[0].strip() if len(details) > 0 else ''
            house['area'] = details[1].strip() if len(details) > 1 else ''
            house['direction'] = details[2].strip() if len(details) > 2 else ''


        follow_info_tag = item.select_one('.followInfo')
        if follow_info_tag:
            follow_text = follow_info_tag.get_text(strip=True)
            parts = [p.strip() for p in follow_text.split('/') if p.strip()]
            if len(parts) == 2:
                house['followers'] = parts[0]
                house['list_date'] = parts[1]
        tag_elements = item.select('.tag span')
        house['tags'] = [tag.get_text(strip=True) for tag in tag_elements]
        total_price_tag = item.select_one('.totalPrice span')
        if total_price_tag:
            house['total_price'] = total_price_tag.get_text(strip=True) + "万"
        unit_price_tag = item.select_one('.unitPrice span')
        if unit_price_tag:
            house['unit_price'] = unit_price_tag.get_text(strip=True)
        if house.get('url'):
            house_list.append(house)
    return house_list

def is_last_page(html_content):
    """
    判断当前页是否为最后一页
    :param html_content: 页面 HTML 内容
    :return: True 表示是最后一页，False 表示不是
    """
    if html_content is None:
        return True
    soup = BeautifulSoup(html_content, 'html.parser')
    page_box = soup.find('div', class_='house-lst-page-box')
    if page_box:
        page_data_str = page_box.get('page-data')
        if page_data_str:
            try:
                page_data = json.loads(page_data_str)
                total_page = page_data.get('totalPage', 1)
                cur_page = page_data.get('curPage', 1)
                return cur_page >= total_page
            except json.JSONDecodeError:
                pass
    return False

def save_to_csv(area, houses):
    """
    将房屋信息保存到指定区域的 CSV 文件中
    :param area: 区域名称
    :param houses: 房屋信息列表
    """
    # 确保 csv 目录存在
    csv_dir = 'csv'
    if not os.path.exists(csv_dir):
        os.makedirs(csv_dir)
    
    csv_file = os.path.join(csv_dir, f'{area}_houses.csv')
    fieldnames = ['title', 'url', 'community', 'followers', 'list_date', 'tags', 'total_price', 'unit_price']
    file_exists = os.path.isfile(csv_file)
    
    with open(csv_file, 'a', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        if not file_exists:
            writer.writeheader()
        for house in houses:
            writer.writerow(house)

def main():
    with sync_playwright() as p:
        # Launch the browser with the --disable-web-security flag
        browser = p.chromium.launch(
            headless=False,
            args=[
                "--disable-web-security",
                "--disable-features=IsolateOrigins,site-per-process"
            ]
        )
        context = browser.new_context(
            user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            extra_http_headers={
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Cache-Control': 'max-age=0',
                'Connection': 'keep-alive',
            }
        )
        page = context.new_page()

        # Navigate to the login page
        page.goto(LOGIN_URL)
        print("Please complete the login operation in the pop-up browser window.")
        input("After logging in, please press Enter here to continue...")

        for area in AREAS:
            area_houses = []
            for asset in ASSETS:
                page_num = 1
                while True:
                    url = BASE_URL.format(area=area, pageNum=page_num, asset=asset)
                    print(f"Requesting {url}")
                    html_content = get_page_content(page, url)
                    if html_content:
                        house_info = extract_house_info(html_content)
                        area_houses.extend(house_info)
                    if is_last_page(html_content):
                        break
                    page_num += 1
            # 跑完一个区域后保存到 CSV 文件
            save_to_csv(area, area_houses)
        
        browser.close()
        

if __name__ == "__main__":
    main()