import asyncio
import csv
import time
import random
import sys
import os
import traceback
import argparse
import re
import json
import logging
import asyncio
from datetime import datetime
from pathlib import Path
from playwright.async_api import Playwright, async_playwright, expect

# 添加项目根目录到系统路径，以便导入utils模块
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 页码持久化文件路径
PAGE_STATE_FILE = "page_state.json"

# 读取上次爬取的页码
def load_page_state():
    try:
        if os.path.exists(PAGE_STATE_FILE):
            with open(PAGE_STATE_FILE, "r", encoding="utf-8") as f:
                state = json.load(f)
                return state.get("current_page", 1)
        return 1
    except Exception as e:
        print(f"读取页码状态文件出错: {e}")
        return 1

# 保存当前爬取的页码
def save_page_state(current_page):
    try:
        with open(PAGE_STATE_FILE, "w", encoding="utf-8") as f:
            json.dump({"current_page": current_page}, f)
        print(f"已保存当前页码 {current_page} 到状态文件")
        return True
    except Exception as e:
        print(f"保存页码状态文件出错: {e}")
        return False

# 加载爬虫配置
def load_spider_config():
    config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "config", "spider.json")
    try:
        if os.path.exists(config_path):
            with open(config_path, "r", encoding="utf-8") as f:
                return json.load(f)
        return {"retry_wait_seconds": 60, "max_retry_times": 3}  # 默认配置
    except Exception as e:
        print(f"读取爬虫配置文件出错: {e}")
        return {"retry_wait_seconds": 60, "max_retry_times": 3}  # 默认配置

async def run(playwright: Playwright, headless=True, debug_dir="../debug", current_page=None, city_pinyin="chongqing", area_pinyin="banan"):
    # 加载爬虫配置
    spider_config = load_spider_config()
    retry_wait_seconds = spider_config.get("retry_wait_seconds", 60)
    max_retry_times = spider_config.get("max_retry_times", 3)
    
    # 生成统一的时间戳，确保CSV文件和日志文件使用相同的时间
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    date_folder = datetime.now().strftime('%Y%m%d')  # 当前日期作为二级目录名
    
    # 确保CSV、LOG和DEBUG目录存在（包括二级目录）
    csv_dir = f"../csv/{date_folder}"
    log_dir = f"../log/{date_folder}"
    debug_dir = f"../debug/{date_folder}"
    Path(csv_dir).mkdir(parents=True, exist_ok=True)
    Path(log_dir).mkdir(parents=True, exist_ok=True)
    Path(debug_dir).mkdir(parents=True, exist_ok=True)
    
    # 配置日志文件名（与CSV文件使用相同的命名规则）
    start_url = f"https://{city_pinyin}.anjuke.com/sale/{area_pinyin}/p{current_page}/"
    log_filename = f"{log_dir}/{city_pinyin}_{area_pinyin}_{current_page}_{timestamp}.log"
    
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_filename, encoding='utf-8'),
            logging.StreamHandler(sys.stdout)
        ]
    )
    logger = logging.getLogger(__name__)
    
    browser = None
    context = None
    page = None
    success = False
    start_time = datetime.now()
    log_prefix = f"[{start_time.strftime('%H:%M:%S')}]"

    try:
        logger.info(f"{log_prefix} 启动浏览器...")
        # 根据参数决定是否使用无头模式启动浏览器，添加更多启动选项以提高稳定性
        browser = await playwright.chromium.launch(
            headless=headless,
            args=[
                '--disable-gpu',
                '--disable-dev-shm-usage',
                '--disable-setuid-sandbox',
                '--no-sandbox',
                '--disable-extensions',
                '--disable-popup-blocking',
            ]
        )

        # 创建浏览器上下文，设置更多选项
        context = await browser.new_context(
            viewport={'width': 1920, 'height': 1080},
            user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
        )

        # 创建新页面
        page = await context.new_page()

        # 设置页面超时
        page.set_default_timeout(60000)  # 60秒超时

        logger.info(f"{log_prefix} 导航到安居客二手房页面...")

        # 如果没有传入页码，则从文件读取上次爬取的页码
        if current_page is None:
            current_page = load_page_state()
        logger.info(f"{log_prefix} 从页码 {current_page} 继续爬取")

        # start_url = f"https://chongqing.anjuke.com/sale/banan/p{current_page}/"
        try:
            response = await page.goto(
                start_url,
                wait_until="domcontentloaded",
                timeout=60000
            )
            logger.info(f"{log_prefix} 页面响应状态: {response.status if response else '未知'}")
        except Exception as nav_error:
            print(f"{log_prefix} 导航到页面时出错: {nav_error}")
            print(f"{log_prefix} 尝试使用备用方法导航...")
            await page.goto(
                 f"https://{city_pinyin}.anjuke.com/sale/{area_pinyin}/",
                 wait_until="commit",
                 timeout=60000
             )

        # 等待页面加载完成
        logger.info(f"{log_prefix} 等待页面加载...")
        try:
            await page.wait_for_load_state("networkidle", timeout=30000)
            logger.info(f"{log_prefix} 页面网络活动已停止")
        except Exception as load_error:
            logger.warning(f"{log_prefix} 等待页面加载完成时出错: {load_error}")
            logger.info(f"{log_prefix} 继续执行，尝试等待DOM内容加载...")
            await asyncio.sleep(5)  # 等待5秒

        # 使用传入的城市参数，不从页面提取
        # city_pinyin 变量已经从命令行参数传入，无需从页面提取
        logger.info(f"{log_prefix} 使用命令行参数指定的城市: {city_pinyin}")
        
        try:
            # 查找包含location信息的meta标签
            location_meta = await page.query_selector('meta[name="location"]')
            if location_meta:
                content = await location_meta.get_attribute("content")
                if content:
                    # 解析content中的city名称信息
                    if "city=" in content:
                        city_match = re.search(r'city=([^;]+)', content)
                        if city_match:
                            city_name = city_match.group(1)
                            logger.info(f"{log_prefix} 从meta标签提取到城市信息: {city_name}")
        except Exception as city_name_error:
            logger.warning(f"{log_prefix} 提取城市信息时出错: {city_name_error}")

        # 保存页面内容用于调试
        logger.info(f"{log_prefix} 保存页面内容用于调试...")
        try:
            page_content = await page.content()
            # 检查是否出现反爬提示
            if "访问过于频繁" in page_content:
                # 保存当前页码，以便重试全部失败时从这里继续
                save_page_state(current_page)
                logger.info(f"已保存当前页码 {current_page}，以便需要时从此页继续爬取")
                
                # 尝试多次重试
                retry_count = 0
                while retry_count < max_retry_times:
                    retry_count += 1
                    logger.warning(f"检测到反爬策略：访问过于频繁。第 {retry_count}/{max_retry_times} 次重试，将等待 {retry_wait_seconds} 秒...")
                    
                    # 等待指定的时间
                    logger.info(f"等待 {retry_wait_seconds} 秒后重试...")
                    await asyncio.sleep(retry_wait_seconds)
                    
                    # 重新加载页面
                    logger.info(f"第 {retry_count} 次重新加载页面...")
                    try:
                        await page.reload(wait_until="domcontentloaded", timeout=60000)
                        logger.info("页面重新加载完成，检查是否仍然被反爬...")
                        
                        # 检查是否仍然被反爬
                        new_content = await page.content()
                        if "访问过于频繁" not in new_content:
                            logger.info("重试成功！继续爬取...")
                            # 重试成功，跳出循环继续爬取
                            break
                        
                        # 如果是最后一次重试仍然失败
                        if retry_count >= max_retry_times:
                            logger.error(f"已重试 {max_retry_times} 次仍然被反爬，爬虫将停止运行以避免进一步封禁。")
                            # 关闭浏览器资源
                            try:
                                if page:
                                    await page.close()
                                if context:
                                    await context.close()
                                if browser:
                                    await browser.close()
                            except Exception as close_error:
                                logger.warning(f"关闭浏览器资源时出错: {close_error}")
                            logger.info("爬虫程序即将退出...")
                            # 直接结束程序
                            sys.exit(1)  # 使用非零退出码表示异常退出
                    except Exception as reload_error:
                        logger.error(f"重新加载页面时出错: {reload_error}")
                        # 如果是最后一次重试出错
                        if retry_count >= max_retry_times:
                            logger.error(f"已重试 {max_retry_times} 次均失败，爬虫将停止运行。")
                            # 关闭浏览器资源
                            try:
                                if page:
                                    await page.close()
                                if context:
                                    await context.close()
                                if browser:
                                    await browser.close()
                            except Exception as close_error:
                                logger.warning(f"关闭浏览器资源时出错: {close_error}")
                            logger.info("爬虫程序即将退出...")
                            # 直接结束程序
                            sys.exit(1)  # 使用非零退出码表示异常退出
            
            # 确保debug目录存在（按日期组织）
            debug_filename = f"{debug_dir}/{city_pinyin}_{area_pinyin}_{current_page}_{timestamp}.html"
            with open(debug_filename, "w", encoding="utf-8") as f:
                f.write(page_content)
            logger.info(f"{log_prefix} 已保存页面内容到{debug_filename}")
        except Exception as save_error:
            logger.error(f"{log_prefix} 保存页面内容时出错: {save_error}")

        # 等待房源列表加载，尝试多个选择器
        logger.info(f"{log_prefix} 等待房源列表加载...")
        house_selector = None
        selectors_to_try = [".property", ".list-item", ".house-item", ".item-mod", "div[data-component='item']", ".house-list .item"]

        for selector in selectors_to_try:
            try:
                logger.info(f"{log_prefix} 尝试选择器: {selector}")
                await page.wait_for_selector(selector, timeout=10000)
                house_selector = selector
                logger.info(f"{log_prefix} 成功找到选择器: {selector}")
                break
            except Exception:
                logger.warning(f"{log_prefix} 选择器 {selector} 未找到元素")

        if not house_selector:
            logger.warning(f"{log_prefix} 所有选择器都未找到房源列表，尝试继续执行...")

        # 滚动页面以确保所有内容加载
        logger.info(f"{log_prefix} 滚动页面以加载所有内容...")
        try:
            # 多次滚动页面，确保动态内容加载
            for i in range(3):
                await page.evaluate(f"window.scrollTo(0, {i * 1000})")
                await asyncio.sleep(1)
            await page.evaluate("window.scrollTo(0, 0)")
            await asyncio.sleep(2)  # 等待滚动后的内容加载
        except Exception as scroll_error:
            logger.error(f"{log_prefix} 滚动页面时出错: {scroll_error}")

        # 提取房源信息
        logger.info(f"{log_prefix} 提取房源信息...")
        houses = []

        # 使用多个选择器尝试获取房源列表
        house_elements = []
        for selector in selectors_to_try:
            try:
                elements = await page.query_selector_all(selector)
                if elements and len(elements) > 0:
                    house_elements = elements
                    logger.info(f"{log_prefix} 使用选择器 {selector} 找到 {len(elements)} 个房源")
                    break
            except Exception as selector_error:
                logger.warning(f"{log_prefix} 使用选择器 {selector} 查询元素时出错: {selector_error}")

        if not house_elements or len(house_elements) == 0:
            logger.warning(f"{log_prefix} 未找到任何房源元素，尝试使用JavaScript获取")
            try:
                # 尝试使用JavaScript直接获取元素
                house_elements_count = await page.evaluate("""
                    (() => {
                        const elements = document.querySelectorAll('.property, .list-item, .house-item, .item-mod');
                        return elements.length;
                    })()
                """)
                logger.info(f"{log_prefix} JavaScript找到 {house_elements_count} 个房源元素")
            except Exception as js_error:
                logger.error(f"{log_prefix} 使用JavaScript获取元素时出错: {js_error}")

        logger.info(f"{log_prefix} 找到 {len(house_elements)} 个房源")

        # 处理当前页的所有房源
        max_houses = len(house_elements)
        for i, house_element in enumerate(house_elements[:max_houses]):
            try:
                logger.info(f"{log_prefix} 正在提取第 {i+1}/{max_houses} 个房源信息...")

                # 提取标题 - 尝试多个选择器
                title_selectors = [
                    ".property-content-title-name", ".house-title", "h3", ".item-title", ".title", ".name"
                ]

                # 房源中的存在 超链接 以 view/开头, ?结尾的 作为 id, 或者 以 / 开头,以.html?结尾的 字符串 为 id.
                # 提取房源ID
                house_id = ""
                link_element = await house_element.query_selector("a[href]")
                if link_element:
                    href = await link_element.get_attribute("href")
                    if href:
                        match = re.search(r"/view/(.*?)(?:\?|$)|/([^/]+)\.html", href)
                        if match:
                            house_id = match.group(1) or match.group(2)
                            logger.info(f"{log_prefix} 从链接中提取到ID: {house_id}")
                        else:
                            logger.warning(f"{log_prefix} 未从链接中匹配到ID: {href}")
                    else:
                        logger.warning(f"{log_prefix} 链接元素没有href属性")
                else:
                    logger.warning(f"{log_prefix} 未找到房源超链接元素")

                # 提取标题 - 尝试多个选择器
                title = ""
                try:
                    for selector in title_selectors:
                        title_element = await house_element.query_selector(selector)
                        if title_element:
                            title = await title_element.text_content()
                            title = title.strip()
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到标题: {title}")
                            break
                except Exception as title_error:
                    logger.warning(f"{log_prefix} 提取标题时出错: {title_error}")

                # 提取总价 - 尝试多个选择器
                price_selectors = [
                    ".property-price-total-num", ".price-det", ".price", ".item-price", ".total-price"
                ]
                total_price = ""
                try:
                    for selector in price_selectors:
                        price_element = await house_element.query_selector(selector)
                        if price_element:
                            total_price = await price_element.text_content()
                            total_price = total_price.strip()
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到总价: {total_price}")
                            break
                except Exception as price_error:
                    logger.warning(f"{log_prefix} 提取总价时出错: {price_error}")

                # 提取单价 - 尝试多个选择器
                unit_price_selectors = [
                    ".property-price-average", ".unit-price", ".price-txt", ".price-average"
                ]
                unit_price = ""
                try:
                    for selector in unit_price_selectors:
                        unit_price_element = await house_element.query_selector(selector)
                        if unit_price_element:
                            unit_price = await unit_price_element.text_content()
                            unit_price = unit_price.strip()
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到单价: {unit_price}")
                            break
                except Exception as unit_price_error:
                    logger.warning(f"{log_prefix} 提取单价时出错: {unit_price_error}")

                # 提取户型 - 尝试多个选择器
                house_type_selectors = [
                    ".property-content-info-text:nth-child(1)", ".house-details span:nth-child(1)",
                    ".details-item:nth-child(1)", ".huxing"
                ]
                house_type = ""
                try:
                    for selector in house_type_selectors:
                        house_type_element = await house_element.query_selector(selector)
                        if house_type_element:
                            house_type = await house_type_element.text_content()
                            house_type = house_type.strip()
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到户型: {house_type}")
                            break
                except Exception as house_type_error:
                    logger.warning(f"{log_prefix} 提取户型时出错: {house_type_error}")

                # 提取面积 - 尝试多个选择器
                area_selectors = [
                    ".property-content-info-text:nth-child(2)", ".house-details span:nth-child(2)",
                    ".details-item:nth-child(2)", ".area"
                ]
                area_size = ""
                try:
                    for selector in area_selectors:
                        area_element = await house_element.query_selector(selector)
                        if area_element:
                            area_size = await area_element.text_content()
                            area_size = area_size.strip()
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到面积: {area_size}")
                            break
                except Exception as area_error:
                    logger.warning(f"{log_prefix} 提取面积时出错: {area_error}")

                # 提取小区名称 - 尝试多个选择器
                community_selectors = [
                    ".property-content-info-comm-name", ".community-name", ".details-item", ".community"
                ]
                community = ""
                try:
                    for selector in community_selectors:
                        community_element = await house_element.query_selector(selector)
                        if community_element:
                            community = await community_element.text_content()
                            community = community.strip()
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到小区: {community}")
                            break
                except Exception as community_error:
                    logger.warning(f"{log_prefix} 提取小区名称时出错: {community_error}")

                # 提取地址 - 尝试多个选择器
                address_selectors = [
                    ".property-content-info-comm-address", ".address", ".details-item:nth-child(2)", ".loc"
                ]
                address = ""
                try:
                    for selector in address_selectors:
                        address_element = await house_element.query_selector(selector)
                        if address_element:
                            address = await address_element.text_content()
                            address = address.strip()
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到地址: {address}")
                            break
                except Exception as address_error:
                    logger.warning(f"{log_prefix} 提取地址时出错: {address_error}")

                # 提取标签 - 尝试多个选择器
                tags_selectors = [
                    ".property-content-info-tag", ".tag-item", ".tags span", ".tag"
                ]
                tags = []
                try:
                    for selector in tags_selectors:
                        tags_elements = await house_element.query_selector_all(selector)
                        if tags_elements and len(tags_elements) > 0:
                            for tag_element in tags_elements:
                                tag_text = await tag_element.text_content()
                                tags.append(tag_text.strip())
                            logger.info(f"{log_prefix} 使用选择器 {selector} 提取到标签: {', '.join(tags)}")
                            break
                except Exception as tags_error:
                    logger.warning(f"{log_prefix} 提取标签时出错: {tags_error}")
                tags_text = ",".join(tags) if tags else ""

                # 提取朝向信息 - 尝试多个选择器
                orientation_selectors = [
                    ".property-content-info-text:nth-child(3)", ".house-details span:nth-child(3)",
                    ".details-item:nth-child(3)", ".orientation", ".toward", "[data-component='HouseInfo'] div:contains('朝')"
                ]
                orientation = ""
                try:
                    for selector in orientation_selectors:
                        orientation_element = await house_element.query_selector(selector)
                        if orientation_element:
                            orientation_text = await orientation_element.text_content()
                            orientation_text = orientation_text.strip()
                            # 检查文本是否包含朝向相关信息
                            if any(keyword in orientation_text for keyword in ["朝", "东", "南", "西", "北", "向"]):
                                orientation = orientation_text
                                logger.info(f"{log_prefix} 使用选择器 {selector} 提取到朝向: {orientation}")
                                break
                except Exception as orientation_error:
                    logger.warning(f"{log_prefix} 提取朝向时出错: {orientation_error}")

                # 优化建造年份提取逻辑
                build_year = ""
                try:
                    # 先尝试原有选择器（移除:contains）
                    for selector in [
                        ".property-content-info-text:nth-child(5)", ".house-details span:nth-child(5)",
                        ".details-item:nth-child(5)", ".build-year", ".year"
                    ]:
                        build_year_element = await house_element.query_selector(selector)
                        if build_year_element:
                            build_year_text = await build_year_element.text_content()
                            build_year_text = build_year_text.strip()
                            if "年建造" in build_year_text or re.search(r"\d{4}年", build_year_text):
                                build_year = build_year_text
                                logger.info(f"{log_prefix} 使用选择器 {selector} 提取到建造年份: {build_year}")
                                break
                    # 如果还没找到，遍历所有 .property-content-info-text，正则兜底
                    if not build_year:
                        all_texts = await house_element.query_selector_all(".property-content-info-text")
                        for el in all_texts:
                            text = (await el.text_content()).strip()
                            if "年建造" in text or re.search(r"\d{4}年", text):
                                build_year = text
                                logger.info(f"{log_prefix} 兜底正则提取到建造年份: {build_year}")
                                break
                except Exception as build_year_error:
                    logger.warning(f"{log_prefix} 提取建造年份时出错: {build_year_error}")

                # 优化楼层提取逻辑
                floor = ""
                try:
                    for selector in [
                        ".property-content-info-text:nth-child(4)", ".house-details span:nth-child(4)",
                        ".details-item:nth-child(4)", ".floor", ".level"
                    ]:
                        floor_element = await house_element.query_selector(selector)
                        if floor_element:
                            floor_text = await floor_element.text_content()
                            floor_text = floor_text.strip()
                            if any(k in floor_text for k in ["层", "高层", "中层", "低层", "共"]):
                                floor = floor_text
                                logger.info(f"{log_prefix} 使用选择器 {selector} 提取到楼层: {floor}")
                                break
                    # 如果还没找到，遍历所有 .property-content-info-text，正则兜底
                    if not floor:
                        all_texts = await house_element.query_selector_all(".property-content-info-text")
                        for el in all_texts:
                            text = (await el.text_content()).strip()
                            if re.search(r"(高层|中层|低层|共\d+层|\d+层)", text):
                                floor = text
                                logger.info(f"{log_prefix} 兜底正则提取到楼层: {floor}")
                                break
                except Exception as floor_error:
                    logger.warning(f"{log_prefix} 提取楼层时出错: {floor_error}")

                # 提取城市和区域信息 - 从地址中解析
                area_detail = ""
                district = ""
                try:
                    # 尝试从地址信息中提取区域信息
                    address_selectors = [
                        ".property-content-info-comm-address", ".address", ".details-item:nth-child(2)", ".loc"
                    ]
                    
                    for selector in address_selectors:
                        address_element = await house_element.query_selector(selector)
                        if address_element:
                            # 获取所有span元素，这些通常包含区域、街道信息
                            span_elements = await address_element.query_selector_all("span")
                            if span_elements and len(span_elements) >= 2:
                                # 第一个span通常是区域（区或县）
                                district_text = await span_elements[0].text_content()
                                district = district_text.strip()
                                
                                # 第二个span通常是更具体的区域
                                if len(span_elements) >= 2:
                                    area_text = await span_elements[1].text_content()
                                    area_detail = area_text.strip()
                                
                                logger.info(f"{log_prefix} 提取到区域: {district}, 具体区域: {area_detail}")
                                break
                except Exception as area_detail_district_error:
                    logger.warning(f"{log_prefix} 提取区域信息时出错: {area_detail_district_error}")
                
                # 尝试提取更多详细信息 - 使用JavaScript获取所有可能的房源信息
                try:
                    # 使用JavaScript获取所有房源详细信息的文本内容
                    all_details = await house_element.evaluate("""
                        (element) => {
                            const allTexts = [];
                            const detailElements = element.querySelectorAll('.property-content-info-text, .details-item, [data-component="HouseInfo"] div');
                            detailElements.forEach(el => {
                                const text = el.textContent.trim();
                                if (text) allTexts.push(text);
                            });
                            return allTexts.join('|');
                        }
                    """)
                    
                    if all_details:
                        logger.info(f"{log_prefix} 获取到所有详细信息: {all_details}")
                        # 这里可以进一步解析all_details字符串，提取更多信息
                except Exception as js_error:
                    logger.error(f"{log_prefix} 使用JavaScript获取详细信息时出错: {js_error}")
                
                # 将房源信息添加到列表
                house_info = {
                    "house_id":  house_id,
                    "标题": title,
                    "总价": total_price,
                    "单价": unit_price,
                    "户型": house_type,
                    "面积": area_size,
                    "朝向": orientation,
                    "建造年份": build_year,
                    "楼层": floor,
                    "城市": city_name,
                    "区域": area_detail, 
                    "区县": district,
                    "小区": community,
                    "地址": address,
                    "标签": tags_text,
                    "爬取时间": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }
                houses.append(house_info)
                logger.info(f"{log_prefix} 成功提取第 {i+1} 个房源信息: {title}")
            except Exception as e:
                logger.error(f"{log_prefix} 提取第 {i+1} 个房源信息时出错: {e}")
                traceback.print_exc()
        
        # 将数据保存到CSV文件
        if houses:
            logger.info(f"{log_prefix} 将 {len(houses)} 个房源信息保存到CSV文件...")
            # 使用传入的city_pinyin和area_pinyin参数构建CSV文件名
            csv_filename = f"{csv_dir}/{city_pinyin}_{area_pinyin}_{current_page}_{timestamp}.csv"
            try:
                with open(csv_filename, "w", newline="", encoding="utf-8-sig") as f:
                    writer = csv.DictWriter(f, fieldnames=houses[0].keys())
                    writer.writeheader()
                    writer.writerows(houses)
                logger.info(f"{log_prefix} 数据已保存到{csv_filename}")
                success = True
            except Exception as csv_error:
                logger.error(f"{log_prefix} 保存CSV文件时出错: {csv_error}")
                traceback.print_exc()
        else:
            logger.warning(f"{log_prefix} 未提取到任何房源信息")

        # 使用mysql_handler.py将房源数据保存到MySQL数据库的数据表中
        if houses:
            try:
                from utils.mysql_handler import save_to_mysql
                mysql_result = save_to_mysql(houses, city_pinyin, area_pinyin)
                if mysql_result:
                    logger.info(f"{log_prefix} 房源数据已成功保存到MySQL数据库表 houses_{city_pinyin}_{area_pinyin}")
                else:
                    logger.error(f"{log_prefix} 保存房源数据到MySQL数据库表 houses_{city_pinyin}_{area_pinyin} 失败")
            except Exception as mysql_error:
                logger.error(f"{log_prefix} 保存到MySQL数据库时出错: {mysql_error}")
                traceback.print_exc()
        
        
        end_time = datetime.now()
        duration = (end_time - start_time).total_seconds()
        logger.info(f"{log_prefix} 当前页爬取完成，耗时: {duration:.2f}秒")
        
        # 检查是否有下一页按钮
        try:
            logger.info(f"{log_prefix} 检查是否有下一页...")
            next_page_selectors = [
                "a:has-text('下一页')", 
                ".next-page", 
                ".next", 
                "a[class*='next']",
                "a[data-page='next']"
            ]
            
            next_page_element = None
            for selector in next_page_selectors:
                next_page_element = await page.query_selector(selector)
                if next_page_element:
                    logger.info(f"{log_prefix} 找到下一页按钮，使用选择器: {selector}")
                    break
            
            # 检查下一页按钮的href属性是否为javascript:void(0);
            has_next_page = False
            if next_page_element:
                href = await next_page_element.get_attribute('href')
                logger.info(f"{log_prefix} 下一页按钮href属性: {href}")
                has_next_page = href and href != "javascript:void(0);"
                
            if has_next_page:
                # 保存当前页码到文件
                save_page_state(current_page)
                
                # 计算下一页页码
                next_page = current_page + 1
                logger.info(f"{log_prefix} 准备访问下一页 (页码: {next_page})")
                
                # 计算下一页页码
                next_page = current_page + 1
                
                # 返回特殊值表示有下一页，但不点击下一页按钮
                # 我们将在main函数中通过构建新的URL来访问下一页
                return {
                    "success": success,
                    "has_next_page": True,
                    "next_page": next_page
                }
            else:
                if next_page_element:
                    logger.info(f"{log_prefix} 下一页按钮href为javascript:void(0);，已到最后一页，爬取完成")
                else:
                    logger.info(f"{log_prefix} 没有找到下一页按钮，爬取完成")
                # 爬取完成后重置页码状态为1
                save_page_state(1)
        except Exception as next_page_error:
            logger.error(f"{log_prefix} 检查下一页时出错: {next_page_error}")
            traceback.print_exc()
        
        # 返回普通成功状态
        return {
            "success": success,
            "has_next_page": False
        }
    except Exception as e:
        logger.error(f"{log_prefix} 爬虫运行出错: {e}")
        traceback.print_exc()
        # 保存当前页码，以便下次从这里继续
        save_page_state(current_page)
        return {
            "success": False,
            "has_next_page": False
        }
    finally:
        # 确保资源被正确释放
        try:
            if page:
                logger.info(f"{log_prefix} 关闭页面...")
                await page.close()
            if context:
                logger.info(f"{log_prefix} 关闭浏览器上下文...")
                await context.close()
            if browser:
                logger.info(f"{log_prefix} 关闭浏览器...")
                await browser.close()
                logger.info(f"{log_prefix} 浏览器已关闭")
        except Exception as close_error:
            logger.error(f"{log_prefix} 关闭资源时出错: {close_error}")
            traceback.print_exc()

async def main(headless=True, max_retries=3, max_pages=None, city_pinyin="chongqing", area_pinyin="banan"):
    retry_count = 0
    success = False
    has_next_page = True
    pages_crawled = 0
    
    # 创建调试目录
    debug_dir = "../debug"
    Path(debug_dir).mkdir(parents=True, exist_ok=True)
    
    # 从文件读取起始页码
    current_page = load_page_state()
    print(f"从页码 {current_page} 开始爬取")
    
    # 持续爬取直到没有下一页或达到最大页数
    while has_next_page and (max_pages is None or pages_crawled < max_pages):
        retry_count = 0
        while retry_count < max_retries and not success:
            try:
                print(f"尝试爬取第 {current_page} 页 (尝试 {retry_count + 1}/{max_retries})...")
                print(f"浏览器模式: {'无头模式' if headless else '有头模式'}")
                
                async with async_playwright() as playwright:
                    result = await run(playwright, headless=headless, debug_dir=debug_dir, current_page=current_page, city_pinyin=city_pinyin, area_pinyin=area_pinyin)
                    success = result.get("success", False)
                    has_next_page = result.get("has_next_page", False)
                    
                    if has_next_page:
                        # 使用run函数返回的next_page值作为下一页的页码
                        current_page = result.get("next_page", current_page + 1)
                        print(f"下一页页码设置为: {current_page}")
                
                if success:
                    # 计算实际爬取的页码（因为current_page可能已经被更新为下一页）
                    actual_crawled_page = current_page - 1 if has_next_page else current_page
                    print(f"第 {actual_crawled_page} 页爬取成功！")
                    pages_crawled += 1
                    print(f"已爬取页数: {pages_crawled}" + (f"/{max_pages}" if max_pages is not None else ""))
                    
                    # 检查是否已达到最大页数
                    if max_pages is not None and pages_crawled >= max_pages:
                        print(f"已达到最大爬取页数 {max_pages}，停止爬取")
                        has_next_page = False
                        break
                    elif has_next_page:
                        print(f"将继续爬取第 {current_page} 页...")
                        # 在爬取下一页之前等待随机时间，避免频繁请求
                        wait_time = random.uniform(3, 8)
                        print(f"等待 {wait_time:.2f} 秒后继续...")
                        await asyncio.sleep(wait_time)
                        # 重置成功标志，准备爬取下一页
                        success = False
                    else:
                        print("没有更多页面需要爬取")
                else:
                    print(f"爬取第 {current_page} 页失败，正在重试... ({retry_count + 1}/{max_retries})")
                    retry_count += 1
                    # 在重试之前等待一段时间
                    await asyncio.sleep(3)
            except Exception as e:
                print(f"爬取第 {current_page} 页时出现异常: {e}")
                traceback.print_exc()
                retry_count += 1
                if retry_count < max_retries:
                    print(f"将在3秒后重试... ({retry_count}/{max_retries})")
                    await asyncio.sleep(3)
                else:
                    print("达到最大重试次数，当前页爬取失败。")
                    # 保存当前页码，下次从这里继续
                    save_page_state(current_page)
                    has_next_page = False
        
        # 如果当前页爬取失败且达到最大重试次数，则停止爬取
        if not success and retry_count >= max_retries:
            print(f"爬取第 {current_page} 页失败，停止爬取。")
            break
    
    total_result = pages_crawled > 0
    if total_result:
        print(f"爬虫成功爬取了 {pages_crawled} 页内容")
    else:
        print("所有重试都失败了，请检查网络连接或网站结构是否发生变化。")
    
    return total_result

if __name__ == "__main__":
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='安居客二手房爬虫')
    parser.add_argument('--headless', action='store_true', help='使用无头模式运行浏览器')
    parser.add_argument('--no-headless', dest='headless', action='store_false', help='使用有头模式运行浏览器')
    parser.add_argument('--retries', type=int, default=3, help='最大重试次数')
    parser.add_argument('--max-pages', type=int, default=None, help='最大爬取页数，默认为无限制')
    parser.add_argument('--reset-page', action='store_true', help='重置页码状态，从第1页开始爬取')
    parser.add_argument('--city_pinyin', type=str, default='chongqing', help='城市拼音，例如：chongqing')
    parser.add_argument('--area_pinyin', type=str, default='banan', help='区域拼音，例如：banan, yubei')
    parser.set_defaults(headless=True)
    
    args = parser.parse_args()
    
    # 如果指定了重置页码，则将页码状态重置为1
    if args.reset_page:
        save_page_state(1)
        print("已重置页码状态，将从第1页开始爬取")
    
    try:
        print(f"启动安居客爬虫程序 - {'无头模式' if args.headless else '有头模式'} - 最大重试次数: {args.retries}")
        print(f"爬取城市: {args.city_pinyin}, 区域: {args.area_pinyin}")
        if args.max_pages:
            print(f"最大爬取页数: {args.max_pages}")
        else:
            print("爬取页数: 无限制（直到没有下一页）")
            
        success = asyncio.run(main(headless=args.headless, max_retries=args.retries, max_pages=args.max_pages, city_pinyin=args.city_pinyin, area_pinyin=args.area_pinyin))
        if success:
            print("\n爬虫程序成功完成！")
        else:
            print("\n爬虫程序未能成功完成。")
    except KeyboardInterrupt:
        print("\n程序被用户中断。")
        # 保存当前页码状态
        current_page = load_page_state()
        print(f"程序中断时的页码: {current_page}，下次将从此页继续爬取")
    except Exception as e:
        print(f"\n程序运行时出现未处理的异常: {e}")
        traceback.print_exc()
    finally:
        print("\n爬虫程序已退出。")