from selenium import webdriver
from time import sleep, time
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import json
import os
from datetime import datetime

# 全局静态变量：用于跟踪重定向文件和实施休息机制
REDIRECT_FILE_COUNT = 0  # 已创建重定向文件的计数器
REST_THRESHOLD = 2  # 休息前处理的重定向文件数量阈值（改为10个文件）
REST_DURATION = 10  # 休息持续时间（秒），默认2分钟
LAST_REST_TIME = 0  # 上次休息的时间戳

# 滚动相关的防封号变量
SCROLL_COUNT = 0  # 滚动次数计数器
SCROLL_REST_THRESHOLD = 2  # 滚动多少次后休息（默认20次滚动）


def check_and_take_scroll_rest():
    """
    检查滚动次数是否需要休息以避免封号和封IP
    当滚动次数达到阈值时，休息2分钟
    """
    global SCROLL_COUNT, LAST_REST_TIME

    if SCROLL_COUNT >= SCROLL_REST_THRESHOLD:
        current_time = time()
        print(f"\n🛡️ === 滚动防封号休息机制触发 === 🛡️")
        print(f"📊 已滚动 {SCROLL_COUNT} 次，达到滚动休息阈值 ({SCROLL_REST_THRESHOLD})")
        print(f"⏰ 为了避免因频繁滚动导致封号和封IP，现在休息 {REST_DURATION} 秒 ({REST_DURATION // 60} 分钟)...")
        print(f"🔄 休息期间请勿关闭程序，系统将自动恢复滚动")
        print("-" * 50)

        # 显示倒计时
        if REST_DURATION >= 30:
            # 大于30秒时，每30秒显示一次进度
            for remaining in range(REST_DURATION, 0, -30):
                mins = remaining // 60
                secs = remaining % 60
                print(f"  ⏳ 滚动休息中... 剩余 {remaining} 秒 ({mins}分{secs}秒)")
                sleep(30)
        else:
            # 小于30秒时，每秒显示一次进度或直接等待
            if REST_DURATION <= 5:
                # 5秒以内直接等待
                print(f"  ⏳ 滚动休息 {REST_DURATION} 秒...")
                sleep(REST_DURATION)
            else:
                # 5秒以上每秒显示进度
                for remaining in range(REST_DURATION, 0, -1):
                    print(f"  ⏳ 滚动休息中... 剩余 {remaining} 秒")
                    sleep(1)

        print("  ✅ 滚动休息完毕，继续滚动处理...")
        print("🛡️ === 滚动休息结束 === 🛡️\n")

        # 重置滚动计数器和更新休息时间
        SCROLL_COUNT = 0
        LAST_REST_TIME = time()


def increment_scroll_count():
    """
    增加滚动次数计数并检查是否需要休息
    """
    global SCROLL_COUNT
    SCROLL_COUNT += 1
    print(f"  -> 【滚动防封号机制】滚动次数: {SCROLL_COUNT}/{SCROLL_REST_THRESHOLD}")

    # 检查是否需要休息
    check_and_take_scroll_rest()


def check_and_take_rest():
    """
    检查是否需要休息以避免封号和封IP
    当重定向文件数量达到阈值时，休息2分钟
    """
    global REDIRECT_FILE_COUNT, LAST_REST_TIME

    if REDIRECT_FILE_COUNT >= REST_THRESHOLD:
        current_time = time()
        print(f"\n🛡️ === 防封号休息机制触发 === 🛡️")
        print(f"📊 已处理 {REDIRECT_FILE_COUNT} 个重定向文件，达到休息阈值 ({REST_THRESHOLD})")
        print(f"⏰ 为了避免封号和封IP，现在休息 {REST_DURATION} 秒 ({REST_DURATION // 60} 分钟)...")
        print(f"🔄 休息期间请勿关闭程序，系统将自动恢复处理")
        print("-" * 50)

        # 显示倒计时
        if REST_DURATION >= 30:
            # 大于30秒时，每30秒显示一次进度
            for remaining in range(REST_DURATION, 0, -30):
                mins = remaining // 60
                secs = remaining % 60
                print(f"  ⏳ 休息中... 剩余 {remaining} 秒 ({mins}分{secs}秒)")
                sleep(30)
        else:
            # 小于30秒时，每秒显示一次进度或直接等待
            if REST_DURATION <= 5:
                # 5秒以内直接等待
                print(f"  ⏳ 休息 {REST_DURATION} 秒...")
                sleep(REST_DURATION)
            else:
                # 5秒以上每秒显示进度
                for remaining in range(REST_DURATION, 0, -1):
                    print(f"  ⏳ 休息中... 剩余 {remaining} 秒")
                    sleep(1)

        print("  ✅ 休息完毕，继续处理...")
        print("🛡️ === 休息结束 === 🛡️\n")

        # 重置计数器和更新休息时间
        REDIRECT_FILE_COUNT = 0
        LAST_REST_TIME = time()


def increment_redirect_count():
    """
    增加重定向文件计数并检查是否需要休息
    """
    global REDIRECT_FILE_COUNT
    REDIRECT_FILE_COUNT += 1
    print(f"  -> 【防封号机制】重定向文件计数: {REDIRECT_FILE_COUNT}/{REST_THRESHOLD}")

    # 检查是否需要休息
    check_and_take_rest()


def process_xiaohongshu_urls(url_list):
    """
    处理小红书URL列表，检测登录状态并处理后续业务逻辑
    
    参数:
        url_list (list): 小红书URL列表
        
    返回:
        dict: 处理结果
    """

    # 设置下载路径（仿照imageCivitai.py）
    download_path = r"H:\download\tmp"
    chrome_history_path = r"H:\download\chrome\profile"

    # 设置 ChromeOptions 来配置下载路径和自动下载（仿照imageCivitai.py）
    chrome_options = Options()
    chrome_options.add_experimental_option("prefs", {
        "download.default_directory": download_path,
        "download.prompt_for_download": False,
        "download.directory_upgrade": True,
        "safebrowsing.enabled": True,
        "profile.default_content_settings.popups": 0
    })

    # 关键配置：指定用户数据目录以保存登录状态
    # 使用统一的用户数据目录来保存浏览器状态（cookies、登录信息等）
    chrome_options.add_argument(r"--user-data-dir=H:\download\userdata")

    # 禁用一些可能干扰登录状态保存的功能
    chrome_options.add_argument("--disable-web-security")
    chrome_options.add_argument("--disable-features=VizDisplayCompositor")
    chrome_options.add_argument("--start-minimized")  # 设置窗口默认最小化

    # 确保用户数据目录存在
    userdata_dir = r"H:\download\userdata"
    if not os.path.exists(userdata_dir):
        os.makedirs(userdata_dir)
        print(f"创建用户数据目录: {userdata_dir}")

    # 创建Chrome浏览器对象（仿照imageCivitai.py）
    service = Service('D:\chromedriver-win64\chromedriver.exe')
    driver = webdriver.Chrome(service=service, options=chrome_options)

    # 设置浏览器的大小（仿照imageCivitai.py）
    driver.set_window_size(1600, 1000)

    print(f"Chrome用户数据目录: {userdata_dir}")
    print("如果登录成功，用户数据将保存到此目录，下次运行时会自动恢复登录状态")

    results = {
        'total_urls': len(url_list),
        'processed_urls': [],
        'login_required': False,
        'login_completed': False,
        'errors': [],
        'timestamp': datetime.now().isoformat()
    }

    try:
        # 处理第一个URL来检测登录状态
        if url_list:
            first_url = url_list[0]
            print(f"正在访问第一个URL检测登录状态: {first_url}")

            # 通过浏览器向服务器发送URL请求
            driver.get(first_url)
            sleep(3)

            # 检测是否需要登录
            login_status = check_xiaohongshu_login_status(driver)
            results['login_required'] = login_status['need_login']

            if login_status['need_login']:
                print("检测到需要登录，正在尝试点击登录按钮...")

                # 尝试点击登录按钮
                login_success = handle_xiaohongshu_login(driver)

                if login_success:
                    print("登录按钮已点击，请在浏览器中完成扫码登录...")

                    # 自动检测登录完成状态，而不是手动按回车
                    login_completed = wait_for_login_completion(driver)
                    results['login_completed'] = login_completed

                    if results['login_completed']:
                        print("检测到登录成功！开始处理URL列表...")
                    else:
                        print("登录超时或失败，请检查登录状态")
                        results['errors'].append("登录超时或失败")
                        return results
                else:
                    print("无法找到或点击登录按钮")
                    results['errors'].append("无法找到登录按钮")
                    return results
            else:
                print("检测到已登录状态，直接开始处理URL列表...")
                results['login_completed'] = True

            # 如果登录成功或无需登录，开始处理所有URL
            if results['login_completed'] or not results['login_required']:
                for i, url in enumerate(url_list):
                    try:
                        print(f"正在处理第 {i + 1}/{len(url_list)} 个URL: {url}")

                        # 访问URL
                        driver.get(url)
                        sleep(2)

                        # 处理单个URL，使用实时提取策略
                        url_result = process_single_xiaohongshu_url_realtime(driver, url)

                        results['processed_urls'].append({
                            'url': url,
                            'index': i + 1,
                            'status': 'success',
                            'result': url_result,
                            'timestamp': datetime.now().isoformat()
                        })

                        print(f"URL {i + 1} 处理完成，提取到 {len(url_result.get('image_links', []))} 个唯一URL")

                        # 立即保存单个URL的结果到独立文件
                        if url_result.get('status') == 'success' and url_result.get('image_links'):
                            print(f"\n=== 正在保存第 {i + 1}/{len(url_list)} 个URL的独立文件 ===")

                            # 保存重定向URL文件和拼接字符串文件
                            redirected_path, concat_path, url_count = save_single_url_result(
                                url_result, i + 1, len(url_list), driver=driver
                            )

                            print(f"     ✓ 第 {i + 1} 个URL的文件保存完成")
                            print(f"     ✓ 重定向URL数量: {url_count}")
                        else:
                            print(f"     - 第 {i + 1} 个URL未提取到有效内容，跳过文件保存")

                    except Exception as e:
                        error_msg = f"处理URL {i + 1} 时出错: {str(e)}"
                        print(error_msg)
                        results['errors'].append(error_msg)
                        results['processed_urls'].append({
                            'url': url,
                            'index': i + 1,
                            'status': 'error',
                            'error': str(e),
                            'timestamp': datetime.now().isoformat()
                        })

        return results, driver

    except Exception as e:
        error_msg = f"处理过程中出现严重错误: {str(e)}"
        print(error_msg)
        results['errors'].append(error_msg)
        return results, driver


def check_xiaohongshu_login_status(driver):
    """
    检测小红书登录状态 - 仅基于特定的登录按钮元素判断
    <button class="reds-button-new login-btn large primary login-btn" id="login-btn" style data-v-a93a7d02>
    
    参数:
        driver: WebDriver实例
        
    返回:
        dict: 登录状态信息
    """

    print("正在检测小红书登录状态...")

    # 只基于你提供的登录按钮特征进行判断
    login_button_selectors = [
        "button#login-btn",  # ID选择器
        "//button[@id='login-btn']",  # XPath ID选择器
        "button.reds-button-new.login-btn",  # class组合选择器
        "//button[contains(@class, 'reds-button-new') and contains(@class, 'login-btn')]"  # XPath class选择器
    ]

    need_login = False
    found_login_button = None
    login_button_info = {}

    for selector in login_button_selectors:
        try:
            if selector.startswith("//"):
                elements = driver.find_elements(By.XPATH, selector)
            else:
                elements = driver.find_elements(By.CSS_SELECTOR, selector)

            for element in elements:
                if element.is_displayed():
                    need_login = True
                    found_login_button = element
                    login_button_info = {
                        'selector': selector,
                        'id': element.get_attribute('id'),
                        'class': element.get_attribute('class'),
                        'text': element.text.strip(),
                        'is_enabled': element.is_enabled()
                    }
                    print(f"  -> 找到登录按钮: {selector}")
                    print(f"     ID: {login_button_info['id']}")
                    print(f"     Class: {login_button_info['class']}")
                    break

            if found_login_button:
                break

        except Exception as e:
            continue

    result = {
        'need_login': need_login,
        'found_login_button': found_login_button is not None,
        'login_button_info': login_button_info,
        'page_title': driver.title,
        'current_url': driver.current_url,
        'timestamp': datetime.now().isoformat()
    }

    status_text = "需要登录" if need_login else "已登录"
    print(f"  -> 登录状态检测结果: {status_text}")

    return result


def handle_xiaohongshu_login(driver):
    """
    处理小红书登录流程 - 基于特定的登录按钮元素
    <button class="reds-button-new login-btn large primary login-btn" id="login-btn" style data-v-a93a7d02>
    
    参数:
        driver: WebDriver实例
        
    返回:
        bool: 是否成功找到并点击登录按钮
    """

    # 只使用你提供的登录按钮特征
    login_button_selectors = [
        "button#login-btn",  # ID选择器
        "//button[@id='login-btn']",  # XPath ID选择器
        "button.reds-button-new.login-btn",  # class组合选择器
        "//button[contains(@class, 'reds-button-new') and contains(@class, 'login-btn')]"  # XPath class选择器
    ]

    print("正在尝试找到并点击登录按钮...")

    for i, selector in enumerate(login_button_selectors):
        try:
            print(f"  -> 尝试选择器 {i + 1}/{len(login_button_selectors)}: {selector}")

            if selector.startswith("//"):
                elements = WebDriverWait(driver, 3).until(
                    EC.presence_of_all_elements_located((By.XPATH, selector))
                )
            else:
                elements = WebDriverWait(driver, 3).until(
                    EC.presence_of_all_elements_located((By.CSS_SELECTOR, selector))
                )

            # 找到可见且可点击的元素
            for element in elements:
                if element.is_displayed() and element.is_enabled():
                    print(f"  -> 找到登录按钮!")
                    print(f"     ID: {element.get_attribute('id')}")
                    print(f"     Class: {element.get_attribute('class')}")
                    print(f"     Text: {element.text}")

                    # 滚动到元素可见位置
                    driver.execute_script("arguments[0].scrollIntoView({block: 'center'});", element)
                    sleep(1)

                    # 点击登录按钮
                    success = click_login_button(driver, element, selector)
                    if success:
                        return True

        except Exception as e:
            print(f"  -> 选择器 {selector} 未找到元素: {e}")
            continue

    print("  -> 未能找到登录按钮")
    return False


def wait_for_login_completion(driver, max_wait_time=300):
    """
    等待登录完成 - 通过检测二维码元素的消失来判断登录状态
    <div data-v-12f84090 data-v-67bebeda class="qrcode force-light">
    
    参数:
        driver: WebDriver实例
        max_wait_time: 最大等待时间（秒），默认5分钟
        
    返回:
        bool: 是否登录成功
    """

    print("正在等待登录完成...")
    print("请在浏览器中扫码登录，程序将自动检测登录状态...")

    # 二维码元素的选择器
    qrcode_selectors = [
        "div.qrcode.force-light",  # class选择器
        ".qrcode",  # 简化class选择器
        "//div[contains(@class, 'qrcode')]",  # XPath选择器
        "//div[contains(@class, 'qrcode') and contains(@class, 'force-light')]",  # 完整XPath选择器
        "[data-v-12f84090][data-v-67bebeda].qrcode",  # 带data属性的选择器
    ]

    # 登录按钮消失选择器（作为辅助判断）
    login_button_selectors = [
        "button#login-btn",
        "//button[@id='login-btn']",
        "button.reds-button-new.login-btn",
        "//button[contains(@class, 'reds-button-new') and contains(@class, 'login-btn')]"
    ]

    start_time = time()
    check_interval = 3  # 每3秒检查一次

    while time() - start_time < max_wait_time:
        try:
            # 检查二维码是否仍然存在
            qrcode_exists = False
            for selector in qrcode_selectors:
                try:
                    if selector.startswith("//"):
                        elements = driver.find_elements(By.XPATH, selector)
                    else:
                        elements = driver.find_elements(By.CSS_SELECTOR, selector)

                    for element in elements:
                        if element.is_displayed():
                            qrcode_exists = True
                            break

                    if qrcode_exists:
                        break

                except Exception as e:
                    continue

            # 检查登录按钮是否仍然存在
            login_button_exists = False
            for selector in login_button_selectors:
                try:
                    if selector.startswith("//"):
                        elements = driver.find_elements(By.XPATH, selector)
                    else:
                        elements = driver.find_elements(By.CSS_SELECTOR, selector)

                    for element in elements:
                        if element.is_displayed():
                            login_button_exists = True
                            break

                    if login_button_exists:
                        break

                except Exception as e:
                    continue

            # 如果二维码和登录按钮都消失了，说明登录成功
            if not qrcode_exists and not login_button_exists:
                print("✓ 检测到二维码和登录按钮都已消失，登录成功！")
                print("✓ 登录状态将自动保存到用户数据目录中")
                sleep(2)  # 等待页面完全加载，确保cookies等数据保存
                return True

            # 显示等待状态
            elapsed_time = int(time() - start_time)
            remaining_time = max_wait_time - elapsed_time

            if qrcode_exists:
                print(f"  -> 检测到二维码仍存在，继续等待... (已等待 {elapsed_time}s，剩余 {remaining_time}s)")
            elif login_button_exists:
                print(f"  -> 检测到登录按钮仍存在，继续等待... (已等待 {elapsed_time}s，剩余 {remaining_time}s)")

            sleep(check_interval)

        except Exception as e:
            print(f"  -> 检测登录状态时出错: {e}")
            sleep(check_interval)
            continue

    print(f"✗ 登录等待超时 ({max_wait_time}s)，请检查登录状态")
    return False


def wait_for_login_completion_alternative(driver, max_wait_time=300):
    """
    备用登录完成检测方法 - 通过URL变化来判断
    
    参数:
        driver: WebDriver实例
        max_wait_time: 最大等待时间（秒）
        
    返回:
        bool: 是否登录成功
    """

    print("使用备用方法检测登录状态...")
    start_time = time()
    initial_url = driver.current_url

    while time() - start_time < max_wait_time:
        try:
            current_url = driver.current_url

            # 如果URL发生变化，可能表示登录成功并跳转了
            if current_url != initial_url:
                print(f"✓ 检测到URL变化，可能登录成功: {current_url}")

                # 再次检查登录按钮是否消失
                login_status = check_xiaohongshu_login_status(driver)
                if not login_status['need_login']:
                    print("✓ 确认登录成功！")
                    return True
                else:
                    print("  -> URL变化但仍需要登录，继续等待...")

            elapsed_time = int(time() - start_time)
            remaining_time = max_wait_time - elapsed_time
            print(f"  -> 等待登录完成... (已等待 {elapsed_time}s，剩余 {remaining_time}s)")

            sleep(3)

        except Exception as e:
            print(f"  -> 检测过程中出错: {e}")
            sleep(3)
            continue

    print("✗ 备用方法检测登录超时")
    return False


def click_login_button(driver, element, selector):
    """
    点击登录按钮的通用方法
    
    参数:
        driver: WebDriver实例
        element: 登录按钮元素
        selector: 使用的选择器（用于日志）
        
    返回:
        bool: 是否成功点击
    """

    try:
        # 方法1: 直接点击
        element.click()
        print(f"     登录按钮点击成功！(选择器: {selector})")
        sleep(2)
        return True

    except Exception as click_error:
        print(f"     直接点击失败: {click_error}")

        try:
            # 方法2: JavaScript点击
            driver.execute_script("arguments[0].click();", element)
            print(f"     JavaScript点击成功！(选择器: {selector})")
            sleep(2)
            return True

        except Exception as js_error:
            print(f"     JavaScript点击失败: {js_error}")

            try:
                # 方法3: ActionChains点击
                ActionChains(driver).move_to_element(element).click().perform()
                print(f"     ActionChains点击成功！(选择器: {selector})")
                sleep(2)
                return True

            except Exception as action_error:
                print(f"     ActionChains点击失败: {action_error}")
                return False


def process_single_xiaohongshu_url_realtime(driver, url):
    """
    处理单个小红书URL - 实时URL提取策略
    在滚动过程中立即提取URL并去重保存，而不是等待内容累积
    
    Args:
        driver: WebDriver实例
        url: 要处理的URL
        
    Returns:
        dict: 处理结果
    """

    print(f"  -> 开始实时提取URL: {url}")

    try:
        # 等待页面加载完成
        sleep(3)

        # 初始化URL收集器
        collected_urls = set()  # 使用set自动去重
        base_url = "https://www.xiaohongshu.com"

        print("  -> 正在使用实时提取策略滚动和收集URL...")

        # 先提取当前可见的URL
        initial_urls = extract_urls_from_current_viewport(driver, base_url)
        collected_urls.update(initial_urls)
        print(f"  -> 初始提取到 {len(initial_urls)} 个URL")

        # 滚动并实时收集URL
        max_scrolls = 30  # 最大滚动次数
        no_new_urls_count = 0  # 连续无新URL的次数
        max_no_new = 5  # 连续5次无新URL就停止

        for scroll_round in range(max_scrolls):
            print(f"  -> 第 {scroll_round + 1} 轮滚动...")

            # 记录滚动前的URL数量
            before_count = len(collected_urls)

            # 执行滚动
            scroll_success = perform_scroll_operation(driver, scroll_round)

            if scroll_success:
                # 滚动后等待内容加载
                sleep(3)

                # 立即提取新的URL
                new_urls = extract_urls_from_current_viewport(driver, base_url)

                # 计算新增URL数量
                before_update = len(collected_urls)
                collected_urls.update(new_urls)
                after_update = len(collected_urls)
                new_count = after_update - before_update

                print(f"  -> 本轮新增 {new_count} 个URL (总计: {len(collected_urls)} 个)")

                if new_count > 0:
                    no_new_urls_count = 0  # 重置计数器
                    # 显示最新的几个URL样例
                    if new_count <= 5:
                        try:
                            latest_urls = list(new_urls)[:3]  # 简化样例提取逻辑
                            for i, sample_url in enumerate(latest_urls):
                                print(f"    样例{i + 1}: {sample_url[:70]}...")
                        except Exception as e:
                            print(f"    样例显示出错: {e}")
                else:
                    no_new_urls_count += 1
                    print(f"  -> 无新URL ({no_new_urls_count}/{max_no_new})")

                # 检查停止条件
                if no_new_urls_count >= max_no_new:
                    print(f"  -> 连续{max_no_new}轮无新URL，提前停止滚动")
                    break
            else:
                print(f"  -> 滚动失败，可能已到页面底部")
                no_new_urls_count += 1
                if no_new_urls_count >= 3:
                    break

        # 转换为列表并最终去重
        final_url_list = list(collected_urls)

        print(f"  -> 滚动完成，共执行{scroll_round + 1}轮，收集到{len(final_url_list)}个唯一URL")

        # 获取页面基本信息
        try:
            page_title = driver.title
        except Exception as e:
            page_title = "获取标题失败"
            print(f"  -> 获取页面标题时出错: {e}")

        # 提取用户标题
        user_title = extract_user_title_from_page(driver)
        print(f"  -> 用户标题: {user_title}")

        result = {
            'url': url,
            'page_title': page_title,
            'user_title': user_title,
            'extraction_method': 'realtime_collection',
            'total_scrolls': scroll_round + 1,
            'total_image_links': len(final_url_list),
            'image_links': final_url_list,
            'status': 'success',
            'timestamp': datetime.now().isoformat()
        }

        print(f"  -> 实时提取完成！总计收集到 {len(final_url_list)} 个唯一URL")
        return result

    except Exception as e:
        error_msg = f"实时URL提取出错: {str(e)}"
        print(f"  -> {error_msg}")
        return {
            'url': url,
            'status': 'error',
            'error': str(e),
            'image_links': [],
            'timestamp': datetime.now().isoformat()
        }


def extract_user_title_from_page(driver):
    """
    从页面中提取用户名标题
    元素标签：<div class="user-name" data-v-1d90bc98>
    
    参数:
        driver: WebDriver实例
        
    返回:
        str: 用户名标题，如果获取失败返回默认值
    """
    try:
        # 尝试多种选择器来查找用户名元素
        selectors = [
            "div.user-name[data-v-1d90bc98]",  # 完整匹配
            "div.user-name",  # 只匹配class
            "div[class*='user-name']",  # 包含user-name class
            ".user-name",  # 简化选择器
            "//div[contains(@class, 'user-name')]",  # XPath选择器
        ]

        for selector in selectors:
            try:
                if selector.startswith("//"):
                    elements = driver.find_elements(By.XPATH, selector)
                else:
                    elements = driver.find_elements(By.CSS_SELECTOR, selector)

                for element in elements:
                    if element.is_displayed():
                        title_text = element.text.strip()
                        if title_text:
                            print(f"  -> 找到用户标题: '{title_text}' (选择器: {selector})")
                            # 清理标题文本，移除文件名中不允许的字符
                            cleaned_title = clean_filename_text(title_text)
                            return cleaned_title
            except Exception as e:
                continue

        print("  -> 未找到用户标题元素")
        return "unknown_user"

    except Exception as e:
        print(f"  -> 提取用户标题时出错: {e}")
        return "unknown_user"


def clean_filename_text(text):
    """
    清理文本以适合作为文件名的一部分
    
    Args:
        text: 原始文本
        
    Returns:
        str: 清理后的文本
    """
    import re
    # 移除或替换文件名中不允许的字符
    cleaned = re.sub(r'[<>:"/\\|?*]', '_', text)
    # 移除多余的空格和特殊字符
    cleaned = re.sub(r'\s+', '_', cleaned)
    # 限制长度
    cleaned = cleaned[:50] if len(cleaned) > 50 else cleaned
    # 移除开头和结尾的下划线
    cleaned = cleaned.strip('_')
    return cleaned if cleaned else "unknown_user"


def extract_urls_from_current_viewport(driver, base_url):
    """从当前视口提取所有相关URL"""
    found_urls = set()

    # 多种选择器策略
    selectors = [
        "a[href*='/explore/']",  # explore页面
        "a[href*='/user/profile/']",  # 用户profile页面
        "a[class*='cover']",  # 包含cover类的链接
        "#userPostedFeeds a[href]"  # userPostedFeeds容器内的链接
    ]

    for selector in selectors:
        try:
            elements = driver.find_elements(By.CSS_SELECTOR, selector)

            for element in elements:
                try:
                    href = element.get_attribute("href")
                    if href and ("/explore/" in href or "/user/profile/" in href):
                        # 确保链接包含图片元素
                        images = element.find_elements(By.TAG_NAME, "img")
                        if images:  # 只收集包含图片的链接
                            # 标准化URL
                            if href.startswith("/"):
                                full_url = base_url + href
                            elif href.startswith("http"):
                                full_url = href
                            else:
                                full_url = base_url + "/" + href.lstrip("/")

                            found_urls.add(full_url)
                except Exception as e:
                    continue

        except Exception as e:
            continue

    return found_urls


def perform_scroll_operation(driver, attempt_number):
    """执行滚动操作"""
    try:
        # 增加滚动计数并检查是否需要休息
        increment_scroll_count()

        # 获取当前页面高度
        before_height = driver.execute_script("return document.body.scrollHeight")

        # 执行向下滚动
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        sleep(2)

        # 检查是否有高度变化
        after_height = driver.execute_script("return document.body.scrollHeight")

        if after_height > before_height:
            print(f"    页面高度从 {before_height}px 增加到 {after_height}px")
            # 额外等待确保内容完全加载
            sleep(3)
            return True
        else:
            print(f"    页面高度未变化 ({before_height}px)")
            return False

    except Exception as e:
        print(f"    滚动操作失败: {e}")
        return False


def scroll_and_load_content_legacy(driver, max_scrolls=15):
    """
    向上滚动页面加载更多内容（小红书特有的向上加载模式）
    优化版本：使用更有效的滚动方法
    
    Args:
        driver: WebDriver实例
        max_scrolls: 最大滚动次数
    """

    print(f"    -> 开始向上滚动加载更多内容（最多滚动{max_scrolls}次）...")

    scroll_count = 0
    no_new_content_count = 0
    last_feed_count = 0

    # 先等待页面完全加载
    sleep(2)

    while scroll_count < max_scrolls:
        # 增加滚动计数并检查是否需要休息
        increment_scroll_count()

        # 统计userPostedFeeds容器内的内容数量
        try:
            feeds_container = driver.find_element(By.CSS_SELECTOR, "#userPostedFeeds")
            feed_items = feeds_container.find_elements(By.CSS_SELECTOR, "a[href]")
            current_feed_count = len(feed_items)
        except:
            # 如果找不到容器，统计整个页面的链接
            try:
                all_links = driver.find_elements(By.CSS_SELECTOR, "a[href*='/explore/']")
                current_feed_count = len(all_links)
            except:
                current_feed_count = 0

        print(f"    -> 第 {scroll_count + 1} 次滚动前，feeds数量: {current_feed_count}")

        # 小红书的向上加载更多机制
        # 方法1: 滚动到页面最顶部并向上拉
        driver.execute_script("window.scrollTo(0, 0);")
        sleep(0.5)

        # 方法2: 尝试向上滑动超出顶部边界（模拟手机端下拉刷新）
        driver.execute_script("window.scrollBy(0, -1000);")
        sleep(1)

        # 方法3: 使用更激进的向上滚动
        for i in range(5):
            driver.execute_script("window.scrollTo(0, -500 - i * 100);")
            sleep(0.3)

        # 方法4: 键盘操作
        try:
            body = driver.find_element(By.TAG_NAME, 'body')
            # 使用多种键盘操作
            for _ in range(3):
                body.send_keys(Keys.HOME)  # Home键到顶部
                sleep(0.2)
                body.send_keys(Keys.ARROW_UP)  # 向上箭头
                sleep(0.2)
                body.send_keys(Keys.PAGE_UP)  # Page Up
                sleep(0.2)
        except:
            pass

        # 方法5: 使用ActionChains进行拖拽操作（模拟触屏向上拉）
        try:
            actions = ActionChains(driver)
            # 在页面中心点向上拖拽
            actions.move_by_offset(800, 400)  # 移动到页面中心
            actions.click_and_hold()
            actions.move_by_offset(0, -300)  # 向上拖拽300px
            actions.release()
            actions.perform()
            sleep(1)
        except:
            pass

        # 最后回到顶部
        driver.execute_script("window.scrollTo(0, 0);")

        # 等待内容加载
        sleep(3)

        # 检查是否有新内容
        try:
            feeds_container = driver.find_element(By.CSS_SELECTOR, "#userPostedFeeds")
            new_feed_items = feeds_container.find_elements(By.CSS_SELECTOR, "a[href]")
            new_feed_count = len(new_feed_items)
        except:
            try:
                all_links = driver.find_elements(By.CSS_SELECTOR, "a[href*='/explore/']")
                new_feed_count = len(all_links)
            except:
                new_feed_count = current_feed_count

        print(f"    -> 第 {scroll_count + 1} 次滚动后，feeds数量: {new_feed_count}")

        if new_feed_count > current_feed_count:
            print(f"    -> ✓ 检测到新内容加载！数量从 {current_feed_count} 增加到 {new_feed_count}")
            no_new_content_count = 0
            last_feed_count = new_feed_count
        else:
            no_new_content_count += 1
            print(f"    -> ✗ 未检测到新内容 ({no_new_content_count}/4)")

            # 如果连续4次都没有新内容，可能已经加载完所有内容
            if no_new_content_count >= 4:
                print(f"    -> 连续 {no_new_content_count} 次未检测到新内容，可能已加载完所有feeds")
                break

        scroll_count += 1

    # 最终回到页面顶部
    driver.execute_script("window.scrollTo(0, 0);")
    sleep(1)

    print(f"    -> 向上滚动完成！总计滚动 {scroll_count} 次，最终feeds数量: {last_feed_count}")

    # 额外等待确保所有内容都已渲染
    sleep(2)


def simple_xiaohongshu_scroll(driver, max_attempts=50):
    """
    简化版小红书滚动方法，专注于最可靠的滚动方式
    增加智能停止条件：当连续多次滚动都没有新内容时自动停止
    
    Args:
        driver: WebDriver实例
        max_attempts: 最大尝试次数（增加到50次）
    """

    print(f"    -> 使用简化版滚动方法加载更多内容（最多{max_attempts}次）...")

    attempt = 0
    consecutive_no_change = 0  # 连续无变化次数
    max_no_change = 5  # 连续5次无变化就停止
    last_item_count = 0
    content_history = []  # 记录最近几次的内容数量，用于检测是否已到底部

    # 获取初始内容数量
    initial_count = count_current_feeds(driver)
    print(f"    -> 初始内容数量: {initial_count}")

    while attempt < max_attempts:
        # 统计当前内容数量
        current_count = count_current_feeds(driver)
        content_history.append(current_count)

        # 只保留最近10次的记录
        if len(content_history) > 10:
            content_history.pop(0)

        print(f"    -> 尝试 {attempt + 1}/{max_attempts}，当前内容数量: {current_count}")

        # 检查是否已经无法加载更多内容
        if len(content_history) >= 5:
            # 检查最近5次是否完全没有变化
            recent_counts = content_history[-5:]
            if len(set(recent_counts)) == 1:  # 所有数量都相同
                print(f"    -> 检测到最近5次滚动内容数量完全没有变化 ({recent_counts})")
                print(f"    -> 可能已经加载完所有内容，提前停止滚动")
                break

        # 执行滚动操作
        scroll_success = perform_single_scroll(driver)

        if not scroll_success:
            consecutive_no_change += 1
            print(f"    -> 滚动操作失败 ({consecutive_no_change}/{max_no_change})")
        else:
            # 检查是否有新内容
            new_count = count_current_feeds(driver)
            print(f"    -> 滚动后内容数量: {new_count}")

            if new_count > current_count:
                print(f"    -> ✓ 成功加载新内容！从 {current_count} 增加到 {new_count} (+{new_count - current_count})")
                consecutive_no_change = 0  # 重置计数器
                last_item_count = new_count

                # 如果一次性加载了很多内容，可能需要额外等待
                added_count = new_count - current_count
                if added_count > 10:
                    print(f"    -> 本次加载了较多内容({added_count}个)，额外等待2秒...")
                    sleep(2)

            else:
                consecutive_no_change += 1
                print(f"    -> ✗ 内容未增加 ({consecutive_no_change}/{max_no_change})")

        # 如果连续多次都没有新内容，说明可能已经到底了
        if consecutive_no_change >= max_no_change:
            print(f"    -> 连续{max_no_change}次未加载到新内容，停止滚动")
            break

        attempt += 1

        # 每10次滚动后显示一次进度统计
        if attempt % 10 == 0:
            total_loaded = last_item_count - initial_count if last_item_count > initial_count else 0
            print(f"    -> 进度统计: 已滚动{attempt}次，总计加载了{total_loaded}个新内容")

    final_count = count_current_feeds(driver)
    total_loaded = final_count - initial_count if final_count > initial_count else 0

    print(f"    -> 简化版滚动完成！")
    print(f"    -> 初始内容: {initial_count}")
    print(f"    -> 最终内容: {final_count}")
    print(f"    -> 总计加载: {total_loaded} 个新内容")
    print(f"    -> 实际滚动: {attempt} 次")

    return final_count


def perform_single_scroll(driver):
    """
    执行单次滚动操作
    
    参数:
        driver: WebDriver实例
        
    返回:
        bool: 滚动是否成功执行
    """
    try:
        # 增加滚动计数并检查是否需要休息
        increment_scroll_count()

        # 滚动到页面顶部
        driver.execute_script("window.scrollTo(0, 0);")
        sleep(0.5)

        # 向上滚动超出边界（模拟下拉刷新）
        driver.execute_script("window.scrollBy(0, -800);")  # 增加滚动距离
        sleep(1)

        # 再次回到顶部
        driver.execute_script("window.scrollTo(0, 0);")
        sleep(2)  # 等待内容加载

        return True

    except Exception as e:
        print(f"    -> 滚动操作失败: {e}")
        return False


def count_current_feeds(driver):
    """统计当前页面的feeds数量"""
    try:
        # 优先统计userPostedFeeds容器内的内容
        container = driver.find_element(By.CSS_SELECTOR, "#userPostedFeeds")
        # 尝试多种选择器
        selectors = [
            "a[class*='cover']",
            "a[href*='/explore/']",
            "a[href*='/user/profile/']",
            "a[href]"
        ]

        for selector in selectors:
            items = container.find_elements(By.CSS_SELECTOR, selector)
            if items:
                return len(items)

        return 0
    except:
        # 如果找不到容器，统计整个页面
        try:
            items = driver.find_elements(By.CSS_SELECTOR, "a[href*='/explore/']")
            return len(items)
        except:
            return 0
    """
    滚动页面加载更多内容
    
    Args:
        driver: WebDriver实例
        max_scrolls: 最大滚动次数
    """

    last_height = driver.execute_script("return document.body.scrollHeight")
    scroll_count = 0

    while scroll_count < max_scrolls:
        # 增加滚动计数并检查是否需要休息
        increment_scroll_count()

        # 滚动到页面底部
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        sleep(2)

        # 计算新的页面高度
        new_height = driver.execute_script("return document.body.scrollHeight")

        if new_height == last_height:
            # 页面高度没有变化，可能已经加载完成
            break

        last_height = new_height
        scroll_count += 1
        print(f"    -> 已滚动 {scroll_count}/{max_scrolls} 次")

    # 回到页面顶部附近
    driver.execute_script("window.scrollTo(0, 0);")
    sleep(1)


def extract_image_links_from_container(driver, container):
    """
    从userPostedFeeds容器中提取所有图片对应的点击链接
    基于元素特征：<a data-v-a264b01a class="cover mask ld" target="_self" href="/user/profile/...">
    
    Args:
        driver: WebDriver实例
        container: userPostedFeeds容器元素
        
    Returns:
        list: 完整的点击链接URL列表
    """

    url_list = []
    processed_links = set()
    base_url = "https://www.xiaohongshu.com"

    # 基于你提供的元素特征，使用多种选择器来查找链接
    link_selectors = [
        # 基于你提供的具体特征
        "a.cover[target='_self']",  # class包含cover且target为_self
        "a[class*='cover'][target='_self']",  # class包含cover且target为_self
        "a.cover.mask",  # class包含cover和mask
        "a[class*='cover'][class*='mask']",  # class包含cover和mask

        # 更通用的选择器
        "a[href*='/user/profile/']",  # href包含用户profile路径
        "a.cover",  # 任何包含cover class的链接
        "a[target='_self']",  # target为_self的链接
        "a[href]"  # 任何有href的链接作为最后备选
    ]

    print(f"    -> 尝试在userPostedFeeds容器中查找图片链接...")

    for selector_index, selector in enumerate(link_selectors):
        try:
            links = container.find_elements(By.CSS_SELECTOR, selector)
            print(f"    -> 选择器 '{selector}' 找到 {len(links)} 个链接")

            for i, link in enumerate(links):
                try:
                    href = link.get_attribute("href")
                    if not href:
                        continue

                    # 检查是否包含图片（通过查找子元素中的img标签）
                    images_in_link = link.find_elements(By.TAG_NAME, "img")
                    if not images_in_link:
                        continue  # 如果链接中没有图片，跳过

                    # 过滤出相关的链接（用户profile或explore相关）
                    if not ("/user/profile/" in href or "/explore/" in href):
                        continue

                    # 拼接完整URL
                    if href.startswith("/"):
                        full_url = base_url + href
                    elif href.startswith("http"):
                        full_url = href
                    else:
                        full_url = base_url + "/" + href.lstrip("/")

                    # 避免重复记录
                    if full_url not in processed_links:
                        processed_links.add(full_url)
                        url_list.append(full_url)

                        # 获取额外信息用于调试
                        link_class = link.get_attribute("class")
                        target = link.get_attribute("target")
                        print(f"    -> {len(url_list)}. {full_url}")
                        print(f"         class: {link_class}, target: {target}")

                except Exception as e:
                    print(f"    -> 处理链接时出错: {e}")
                    continue

            # 如果找到了符合条件的链接，就不再尝试后续的选择器
            if url_list:
                print(f"    -> 使用选择器 '{selector}' 成功找到 {len(url_list)} 个链接")
                break

        except Exception as e:
            print(f"    -> 选择器 '{selector}' 执行失败: {e}")
            continue

    print(f"    -> 总共提取到 {len(url_list)} 个唯一的图片链接")
    return url_list


def get_redirected_urls(driver, url_list, max_wait_per_url=10):
    """
    访问URL列表并获取重定向后的真实地址
    
    Args:
        driver: WebDriver实例
        url_list: 原始URL列表
        max_wait_per_url: 每个URL的最大等待时间（秒）
        
    Returns:
        list: 重定向后的URL列表
    """

    redirected_urls = []
    processed_urls = set()

    print(f"\n开始获取 {len(url_list)} 个URL的重定向地址...")

    for i, url in enumerate(url_list):
        try:
            print(f"  -> 处理第 {i + 1}/{len(url_list)} 个URL...")
            print(f"     原始URL: {url}")

            # 访问URL
            driver.get(url)
            sleep(2)  # 等待页面加载和重定向完成

            # 获取重定向后的URL
            redirected_url = driver.current_url
            print(f"     重定向后: {redirected_url}")

            # 避免重复记录
            if redirected_url not in processed_urls:
                processed_urls.add(redirected_url)
                redirected_urls.append(redirected_url)
                print(f"     ✓ 已记录重定向URL")
            else:
                print(f"     - 重复URL，跳过")

        except Exception as e:
            print(f"     ✗ 获取重定向URL失败: {e}")
            # 如果获取重定向失败，保留原始URL
            if url not in processed_urls:
                processed_urls.add(url)
                redirected_urls.append(url)
                print(f"     ✓ 保留原始URL")
            continue

    print(f"\n重定向处理完成:")
    print(f"  - 原始URL数量: {len(url_list)}")
    print(f"  - 重定向后URL数量: {len(redirected_urls)}")
    print(f"  - 去重后数量: {len(set(redirected_urls))}")

    # 最终去重
    final_urls = list(set(redirected_urls))
    return final_urls


def save_single_url_result(url_result, url_index, total_urls, driver=None):
    """
    保存单个URL的处理结果到重定向JSON文件和拼接字符串文件
    
    Args:
        url_result: 单个URL的处理结果
        url_index: URL索引（从1开始）
        total_urls: 总URL数量
        driver: WebDriver实例（获取重定向URL时需要）
        
    Returns:
        tuple: (重定向文件路径, 拼接字符串文件路径, URL数量)
    """
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

    # 提取用户标题和URL
    user_title = url_result.get('user_title', 'unknown_user')
    image_links = url_result.get('image_links', [])
    original_url = url_result.get('url', 'unknown_url')

    # 创建输出目录
    output_dir = "../output"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 构建文件名（包含索引信息）
    base_filename = f"xiaohongshu_urls_{user_title}_url{url_index}of{total_urls}_{timestamp}"
    redirected_filename = f"{base_filename}_redirected.json"
    concat_filename = f"{base_filename}_urls.txt"

    redirected_filepath = os.path.join(output_dir, redirected_filename)
    concat_filepath = os.path.join(output_dir, concat_filename)

    print(f"  -> 正在保存第 {url_index}/{total_urls} 个URL的结果...")
    print(f"     源URL: {original_url}")
    print(f"     用户: {user_title}")
    print(f"     图片数量: {len(image_links)}")

    # 获取重定向URL
    redirected_urls = []
    if image_links and driver:
        print(f"     -> 正在获取 {len(image_links)} 个URL的重定向地址...")
        redirected_urls = get_redirected_urls(driver, image_links)

        # 保存重定向URL文件
        redirected_data = {
            "source_url": original_url,
            "user_title": user_title,
            "extraction_timestamp": timestamp,
            "url_index": f"{url_index}/{total_urls}",
            "original_count": len(image_links),
            "redirected_count": len(redirected_urls),
            "image_urls": redirected_urls
        }

        with open(redirected_filepath, 'w', encoding='utf-8') as f:
            json.dump(redirected_data, f, ensure_ascii=False, indent=2)
        print(f"     ✓ 重定向URL文件: {redirected_filename}")

        # 增加重定向文件计数，检查是否需要休息
        increment_redirect_count()

        # 生成URL拼接字符串文件
        if redirected_urls:
            url_concat_string = ' '.join(redirected_urls)

            with open(concat_filepath, 'w', encoding='utf-8') as f:
                f.write(url_concat_string)
            print(f"     ✓ URL拼接字符串文件: {concat_filename}")
            print(f"     ✓ 拼接字符串长度: {len(url_concat_string)} 字符")
        else:
            concat_filepath = None
            print(f"     - 无重定向URL，跳过拼接字符串文件")
    else:
        redirected_filepath = None
        concat_filepath = None
        print(f"     - 无图片链接，跳过文件生成")

    return redirected_filepath, concat_filepath, len(redirected_urls)


def save_results_to_file(results, filename=None, get_redirected=True):
    """
    保存处理结果到JSON文件 - 只保存URL数组，可选择获取重定向URL
    文件名现在包含从页面提取的用户标题
    
    Args:
        results: 处理结果
        filename: 文件名
        get_redirected: 是否获取重定向后的URL
    """
    if not filename:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        suffix = "redirected" if get_redirected else "original"

        # 尝试从结果中提取用户标题来构建文件名
        user_title = "unknown_user"
        for processed_url in results.get('processed_urls', []):
            if processed_url.get('status') == 'success' and processed_url.get('result'):
                extracted_title = processed_url['result'].get('user_title', '')
                if extracted_title and extracted_title != "unknown_user":
                    user_title = extracted_title
                    break

        filename = f"xiaohongshu_urls_{user_title}_{suffix}_{timestamp}.json"
        print(f"  -> 构建文件名: {filename}")

    output_dir = "../output"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    filepath = os.path.join(output_dir, filename)

    # 提取所有成功处理的URL列表
    all_urls = []
    for processed_url in results.get('processed_urls', []):
        if processed_url.get('status') == 'success' and processed_url.get('result'):
            image_links = processed_url['result'].get('image_links', [])
            all_urls.extend(image_links)

    # 去重
    unique_urls = list(set(all_urls))

    if get_redirected and unique_urls:
        print(f"\n准备获取 {len(unique_urls)} 个URL的重定向地址...")

        # 需要使用相同的driver来获取重定向URL
        # 这里需要传入driver实例，我们需要修改调用方式
        final_urls = unique_urls  # 暂时先返回原始URL，稍后修改调用方式
    else:
        final_urls = unique_urls

    # 保存URL数组
    with open(filepath, 'w', encoding='utf-8') as f:
        json.dump(final_urls, f, ensure_ascii=False, indent=2)

    print(f"URL列表已保存到: {filepath}")
    print(f"总共保存了 {len(final_urls)} 个唯一URL")
    return filepath, final_urls


if __name__ == '__main__':
    # 小红书URL列表示例
    url_list = [
        "https://www.xiaohongshu.com/user/profile/5671f05eb8ce1a75e255de79?xsec_token=ABUKUZVEC0Lp3MjL__8no6OI0LqyY8x0CtfE5WojNP3kc=&xsec_source=pc_search",
        # 可以添加更多URL
    ]

    print("=== 小红书URL批量处理工具 ===")
    print(f"准备处理 {len(url_list)} 个URL")
    print("本工具将:")
    print("1. 检测登录状态")
    print("2. 如需登录会提示手动扫码")
    print("3. 登录完成后批量处理URL列表")
    print("4. 提取图片链接")
    print("5. 为每个URL生成重定向URL的JSON文件")
    print("6. 为每个URL生成URL拼接字符串的TXT文件")
    print()
    print("=== 防封号机制 ===")
    print(f"- 每处理 {REST_THRESHOLD} 个重定向文件后自动休息 {REST_DURATION // 60} 分钟")
    print(f"- 每滚动 {SCROLL_REST_THRESHOLD} 次后自动休息 {REST_DURATION // 60} 分钟")
    print("- 避免因频繁请求导致封号或封IP")
    print("- 休息期间会显示倒计时提示")
    print()

    # 开始处理
    print("开始处理URL列表...")
    results, driver = process_xiaohongshu_urls(url_list)

    try:
        print(f"\n=== 处理完成摘要 ===")
        successful_count = len([r for r in results.get('processed_urls', []) if r['status'] == 'success'])
        failed_count = len([r for r in results.get('processed_urls', []) if r['status'] == 'error'])

        print(f"成功处理: {successful_count} 个URL")
        print(f"处理失败: {failed_count} 个URL")
        print(f"每个成功处理的URL都已生成:")
        print(f"  - 重定向URL的JSON文件")
        print(f"  - URL拼接字符串的TXT文件")
        print(f"文件保存位置: ../output/ 目录")
        print(f"\\n防封号统计:")
        print(f"  - 当前重定向文件计数: {REDIRECT_FILE_COUNT}")
        print(f"  - 重定向文件休息阈值: {REST_THRESHOLD} 个文件")
        print(f"  - 当前滚动次数计数: {SCROLL_COUNT}")
        print(f"  - 滚动次数休息阈值: {SCROLL_REST_THRESHOLD} 次滚动")
        if LAST_REST_TIME > 0:
            from datetime import datetime

            last_rest_str = datetime.fromtimestamp(LAST_REST_TIME).strftime('%H:%M:%S')
            print(f"  - 最后休息时间: {last_rest_str}")
        else:
            print(f"  - 本次运行未触发休息机制")

        if results['errors']:
            print(f"\n错误详情:")
            for i, error in enumerate(results['errors'], 1):
                print(f"  {i}. {error}")

    finally:
        # 最后关闭浏览器
        print("\n处理完成，浏览器将在1秒后关闭...")
        sleep(1)
        driver.quit()

    # 打印最终摘要
    print("\n=== 最终处理结果摘要 ===")
    print(f"总URL数量: {results['total_urls']}")
    print(f"需要登录: {'是' if results['login_required'] else '否'}")
    print(f"登录完成: {'是' if results['login_completed'] else '否'}")
    print(f"成功处理: {len([r for r in results['processed_urls'] if r['status'] == 'success'])}")
    print(f"处理失败: {len([r for r in results['processed_urls'] if r['status'] == 'error'])}")
    print(f"错误数量: {len(results['errors'])}")

    if results['errors']:
        print("\n错误详情:")
        for i, error in enumerate(results['errors'], 1):
            print(f"  {i}. {error}")

    print("\n处理完成！每个URL都已生成JSON和TXT文件。")
