#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
文章爬取和作者信息提取模块
基于other/main.py中的step crawl逻辑重写
"""

import os
import sqlite3
import json
import time
import random
import logging
import traceback
import base64
import urllib.parse
import re
from typing import Dict, List, Optional, Tuple, Any
from tqdm import tqdm
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, WebDriverException
from webdriver_manager.chrome import ChromeDriverManager
import anthropic
import httpx
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('crawl_log.txt', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


# TODO: 请将这里替换为您的真实代理服务器地址和端口
# 例如: PROXY = "http://user:pass@127.0.0.1:7890" or "socks5://127.0.0.1:1080"
# 测试结果显示无代理模式可以正常访问ScienceDirect
#PROXY = "http://36.147.78.166:80" 
PROXY = None  # 无代理模式 - 测试证明可以正常访问 

class ArticleCrawler:
    def __init__(self, db_path='articles.db'):
        """
        初始化文章爬虫
        
        Args:
            db_path: 数据库文件路径
        """
        self.db_path = db_path
        self.logger = logger
        
        # 加载浏览器配置文件
        self.browser_profiles = self._load_browser_profiles()
        
        # 备用User-Agent池（如果配置文件加载失败）
        self.fallback_user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        ]
        
        # 当前使用的浏览器配置索引
        self.current_profile_index = 0

        # 请求延迟配置（基于scihub-cn策略）
        self.min_delay = 2.0  # 最小延迟（秒）
        self.max_delay = 8.0  # 最大延迟（秒）
        self.last_request_time = 0  # 上次请求时间
        
        # 初始化会话和网络配置
        self._configure_network_session()
        
        # 配置重试策略
        retry_strategy = Retry(
                total=3,
                status_forcelist=[429, 500, 502, 503, 504],
                allowed_methods=["HEAD", "GET", "OPTIONS"],
                backoff_factor=1
            )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)
        

    def _load_browser_profiles(self):
        """加载浏览器配置文件"""
        try:
            profiles_path = os.path.join(os.path.dirname(__file__), 'browser_profiles.json')
            if os.path.exists(profiles_path):
                with open(profiles_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    profiles = data.get('profiles', [])
                    if profiles:
                        logger.info(f"成功加载 {len(profiles)} 个浏览器配置文件")
                        return profiles
                    else:
                        logger.warning("浏览器配置文件为空，使用备用配置")
                        return []
            else:
                logger.warning("浏览器配置文件不存在，使用备用配置")
                return []
        except Exception as e:
            logger.error(f"加载浏览器配置文件失败: {e}")
            return []

    def get_next_browser_profile(self):
        """获取下一个浏览器配置"""
        if not self.browser_profiles:
            # 如果没有配置文件，使用备用User-Agent
            user_agent = random.choice(self.fallback_user_agents)
            return {
                'name': 'fallback',
                'user_agent': user_agent,
                'cookies': ''
            }
        
        # 轮换使用配置文件
        profile = self.browser_profiles[self.current_profile_index]
        self.current_profile_index = (self.current_profile_index + 1) % len(self.browser_profiles)
        
        logger.info(f"使用浏览器配置: {profile['name']}")
        return profile

    def _configure_network_session(self):
        """配置网络会话，解决SSL和代理问题"""
        # 创建新的会话
        self.session = requests.Session()
        
        # 强制禁用系统代理，避免干扰
        self.session.trust_env = False
        self.session.proxies = {}
        
        # 禁用requests库的SSL验证
        self.session.verify = False
        
        # 获取当前浏览器配置
        current_profile = self.get_next_browser_profile()
        
        # 设置默认请求头
        self.session.headers.update({
            'User-Agent': current_profile['user_agent'],
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'DNT': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
            'Sec-Ch-Ua-Mobile': '?0',
            'Sec-Ch-Ua-Platform': '"Windows"'
        })
    def fetch_article_html_enhanced(self, url: str) -> Optional[webdriver.Chrome]:
        """
        使用Selenium和WebDriver-Manager获取文章页面的WebDriver实例，支持后续交互操作
        """
        logger.info(f"尝试使用 Selenium 获取: {url}")
        max_retries = 3  # 最多尝试3次
        for i in range(max_retries):
            driver = None
            try:
                # 设置WebDriver（无代理模式）
                driver = self._setup_webdriver()

                # 应用智能延迟，避免被检测为机器人
                self._apply_intelligent_delay()

                # 访问URL
                driver.get(url)

                # 智能等待页面加载完成
                if not self._wait_for_page_load(driver, url):
                    logger.error(f"页面加载失败: {url}")
                    raise WebDriverException("Page load failed")

                logger.info(f"成功访问页面: {url}")
                return driver  # 返回WebDriver实例而不是HTML内容

            except (WebDriverException, TimeoutException) as e:
                error_msg = str(e).lower()
                retry_needed = True
                
                # 根据错误类型决定处理策略
                if "cloudflare" in error_msg or "challenge" in error_msg:
                    logger.warning(f"Cloudflare拦截 (尝试 {i+1}/{max_retries}): {url}")
                    # Cloudflare拦截，增加延迟后重试
                    time.sleep(random.uniform(10, 20))
                elif "403" in error_msg or "access denied" in error_msg:
                    logger.warning(f"访问被拒绝 (尝试 {i+1}/{max_retries}): {url}")
                    # 访问被拒绝，可能需要更换代理
                elif "timeout" in error_msg:
                    logger.warning(f"请求超时 (尝试 {i+1}/{max_retries}): {url}")
                    # 超时错误，增加延迟后重试
                    time.sleep(random.uniform(5, 10))
                elif "connection" in error_msg:
                    logger.warning(f"连接错误 (尝试 {i+1}/{max_retries}): {url}")
                    # 连接错误，可能是代理问题
                else:
                    logger.warning(f"未知错误 (尝试 {i+1}/{max_retries}): {e}")
                
                # 清理WebDriver
                if driver:
                    try:
                        driver.quit()
                    except Exception:
                        pass  # 忽略清理时的错误
                
                # 检查是否需要继续重试
                if i == max_retries - 1:
                    logger.error(f"所有重试均失败，放弃访问: {url}")
                    break
                
                # 在重试前应用额外延迟
                retry_delay = random.uniform(3, 8)
                logger.info(f"重试前等待 {retry_delay:.1f} 秒...")
                time.sleep(retry_delay)
                
                # 继续重试
                continue
            
            except Exception as e:
                logger.error(f"意外错误 (尝试 {i+1}/{max_retries}): {e}")
                if driver:
                    try:
                        driver.quit()
                    except Exception:
                        pass
                
                # 对于意外错误，也进行重试
                if i < max_retries - 1:
                    time.sleep(random.uniform(5, 10))
                    continue
                else:
                    break
        
        logger.error(f"无法成功访问 {url}")
        return None

    def _setup_webdriver(self) -> webdriver.Chrome:
        options = Options()
        
        # 基础设置
        options.add_argument("--headless")
        options.add_argument("--no-sandbox")
        options.add_argument("--disable-dev-shm-usage")
        options.add_argument("--disable-gpu")
        options.add_argument("--log-level=3")
        options.add_argument("--silent")
        
        # 反检测设置 - 参考scihub-cn的策略
        options.add_argument("--disable-blink-features=AutomationControlled")
        options.add_experimental_option("excludeSwitches", ["enable-automation"])
        options.add_experimental_option('useAutomationExtension', False)
        
        # 更真实的浏览器伪装 - 使用轮换的浏览器配置
        current_profile = self.get_next_browser_profile()
        options.add_argument(f"user-agent={current_profile['user_agent']}")
        logger.info(f"WebDriver使用浏览器配置: {current_profile['name']}")
        
        # 如果有cookie，稍后会设置
        self.current_cookies = current_profile.get('cookies', '')
        
        # 浏览器窗口设置
        options.add_argument("--window-size=1920,1080")
        options.add_argument("--start-maximized")
        
        # 语言和地区设置
        options.add_argument("--lang=en-US,en;q=0.9")
        options.add_argument("--accept-language=en-US,en;q=0.9")
        
        # 安全和隐私设置
        options.add_argument("--disable-infobars")
        options.add_argument("--disable-popup-blocking")
        options.add_argument("--disable-notifications")
        options.add_argument("--disable-web-security")
        options.add_argument("--allow-running-insecure-content")
        options.add_argument("--accept-insecure-certs")
        options.add_argument("--ignore-certificate-errors")
        options.add_argument("--ignore-ssl-errors")
        
        # 性能优化
        options.add_argument("--disable-extensions")
        options.add_argument("--disable-plugins")
        options.add_argument("--disable-images")  # 禁用图片加载以提高速度
        # 注意：ScienceDirect需要JavaScript，所以不禁用JS
        
        # 代理设置
        if PROXY:
            options.add_argument(f'--proxy-server={PROXY}')
            # 添加额外的代理相关设置
            options.add_argument('--proxy-bypass-list=<-loopback>')
            options.add_argument('--disable-web-security')
            options.add_argument('--allow-running-insecure-content')
            options.add_argument('--ignore-certificate-errors')
            options.add_argument('--ignore-ssl-errors')
            options.add_argument('--ignore-certificate-errors-spki-list')
            logger.info(f"WebDriver 使用代理: {PROXY}")
        else:
            logger.info("WebDriver 运行在无代理模式")

        # 使用WebDriver-Manager自动管理ChromeDriver
        try:
            logger.info("尝试使用 WebDriver Manager 自动安装/更新 ChromeDriver...")
            service = ChromeService(ChromeDriverManager().install())
            logger.info("WebDriver Manager 初始化成功。")
        except Exception as e:
            logger.warning(f"WebDriver Manager 初始化失败: {e}")
            logger.info("尝试使用系统默认路径的 ChromeDriver...")
            try:
                service = ChromeService()
                logger.info("成功使用系统默认路径的 ChromeDriver。")
            except Exception as e2:
                logger.warning(f"无法从系统默认路径初始化 ChromeService: {e2}")
                logger.info("尝试使用本地路径 './chromedriver.exe'...")
                try:
                    local_chromedriver_path = './chromedriver.exe'
                    if os.path.exists(local_chromedriver_path):
                        service = ChromeService(executable_path=local_chromedriver_path)
                        logger.info("成功使用本地路径的 ChromeDriver。")
                    else:
                        logger.error("在本地路径未找到 chromedriver.exe，Selenium 初始化失败。")
                        return None
                except Exception as e3:
                    logger.error(f"使用本地路径 ChromeDriver 失败: {e3}")
                    return None
    
        logger.info("====== WebDriver manager ======")
        driver = webdriver.Chrome(service=service, options=options)
        
        # 执行反检测脚本，隐藏webdriver特征
        self._execute_stealth_scripts(driver)
        
        # 设置cookie（如果有的话）
        self._set_cookies(driver)
        
        return driver

    def _execute_stealth_scripts(self, driver: webdriver.Chrome):
        """
        执行反检测脚本，隐藏Selenium自动化特征
        """
        try:
            # 隐藏webdriver属性
            driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            
            # 修改navigator.plugins
            driver.execute_script("""
                Object.defineProperty(navigator, 'plugins', {
                    get: () => [1, 2, 3, 4, 5]
                });
            """)
            
            # 修改navigator.languages
            driver.execute_script("""
                Object.defineProperty(navigator, 'languages', {
                    get: () => ['en-US', 'en']
                });
            """)
            
            # 修改screen属性
            driver.execute_script("""
                Object.defineProperty(screen, 'width', {get: () => 1920});
                Object.defineProperty(screen, 'height', {get: () => 1080});
            """)
            
            # 添加真实的Chrome运行时
            driver.execute_script("""
                window.chrome = {
                    runtime: {}
                };
            """)
            
            # 修改权限查询
            driver.execute_script("""
                const originalQuery = window.navigator.permissions.query;
                return window.navigator.permissions.query = (parameters) => (
                    parameters.name === 'notifications' ?
                        Promise.resolve({ state: Notification.permission }) :
                        originalQuery(parameters)
                );
            """)
            
            logger.debug("反检测脚本执行完成")
            
        except Exception as e:
            logger.warning(f"执行反检测脚本失败: {e}")

    def _set_cookies(self, driver: webdriver.Chrome):
        """设置cookie到WebDriver"""
        try:
            if hasattr(self, 'current_cookies') and self.current_cookies:
                # 首先访问一个基础页面以设置域
                driver.get("https://www.sciencedirect.com")
                time.sleep(1)
                
                # 解析cookie字符串
                cookies = self.current_cookies.split('; ')
                for cookie in cookies:
                    if '=' in cookie:
                        name, value = cookie.split('=', 1)
                        try:
                            driver.add_cookie({
                                'name': name.strip(),
                                'value': value.strip(),
                                'domain': '.sciencedirect.com'
                            })
                        except Exception as e:
                            logger.debug(f"设置cookie失败 {name}: {e}")
                
                logger.info(f"成功设置 {len(cookies)} 个cookie")
        except Exception as e:
            logger.warning(f"设置cookie失败: {e}")

    def _prepare_cookie_dict(self, cookie: dict) -> dict:
        """
        准备cookie字典，确保格式正确
        """
        cookie_dict = {
            'name': cookie['name'],
            'value': cookie['value'],
            'domain': cookie['domain'],
            'path': cookie.get('path', '/'),
        }
        
        # 添加可选字段，但要处理特殊情况
        if 'secure' in cookie and isinstance(cookie['secure'], bool):
            cookie_dict['secure'] = cookie['secure']
        if 'httpOnly' in cookie and isinstance(cookie['httpOnly'], bool):
            cookie_dict['httpOnly'] = cookie['httpOnly']
        if 'sameSite' in cookie and cookie['sameSite'] and cookie['sameSite'] != 'no_restriction':
            # Selenium不支持'no_restriction'，转换为None
            if cookie['sameSite'] in ['strict', 'lax']:
                cookie_dict['sameSite'] = cookie['sameSite']
        
        # 处理过期时间
        if 'expirationDate' in cookie and cookie['expirationDate']:
            try:
                # 转换Unix时间戳为整数
                cookie_dict['expiry'] = int(float(cookie['expirationDate']))
            except (ValueError, TypeError):
                pass  # 忽略无效的过期时间
        
        return cookie_dict

    def _apply_intelligent_delay(self):
        """
        应用智能请求延迟，基于 scihub-cn 的反检测策略
        """
        import random
        import time
        
        current_time = time.time()
        
        # 计算距离上次请求的时间间隔
        if self.last_request_time > 0:
            elapsed = current_time - self.last_request_time
            
            # 如果间隔太短，需要额外等待
            min_interval = random.uniform(self.min_delay, self.max_delay)
            if elapsed < min_interval:
                wait_time = min_interval - elapsed
                logger.info(f"应用智能延迟: {wait_time:.2f}秒")
                time.sleep(wait_time)
        
        # 更新上次请求时间
        self.last_request_time = time.time()

    def _simulate_human_behavior(self, driver):
        """
        模拟真实用户行为，增加反检测能力
        """
        try:
            # 1. 随机滚动页面
            scroll_actions = [
                "window.scrollTo(0, 100);",
                "window.scrollTo(0, 300);", 
                "window.scrollTo(0, 500);",
                "window.scrollTo(0, document.body.scrollHeight/4);",
                "window.scrollTo(0, document.body.scrollHeight/2);",
                "window.scrollTo(0, 0);"
            ]
            
            for _ in range(random.randint(2, 4)):
                action = random.choice(scroll_actions)
                driver.execute_script(action)
                time.sleep(random.uniform(1, 3))
            
            # 2. 模拟鼠标移动
            try:
                from selenium.webdriver.common.action_chains import ActionChains
                actions = ActionChains(driver)
                
                # 获取页面尺寸
                window_size = driver.get_window_size()
                width = window_size['width']
                height = window_size['height']
                
                # 随机移动鼠标
                for _ in range(random.randint(1, 3)):
                    x = random.randint(100, width - 100)
                    y = random.randint(100, height - 100)
                    actions.move_by_offset(x, y).perform()
                    time.sleep(random.uniform(0.5, 1.5))
                    
            except Exception as e:
                logger.debug(f"鼠标移动模拟失败: {e}")
            
            # 3. 随机等待
            time.sleep(random.uniform(2, 5))
            
            # 4. 检查页面元素（模拟用户查看）
            try:
                # 尝试查找一些常见元素
                common_selectors = ['h1', 'h2', 'title', 'body', 'main', 'article']
                for selector in random.sample(common_selectors, min(3, len(common_selectors))):
                    try:
                        elements = driver.find_elements(By.CSS_SELECTOR, selector)
                        if elements:
                            # 模拟查看元素
                            time.sleep(random.uniform(0.5, 1))
                    except Exception:
                        pass
            except Exception as e:
                logger.debug(f"元素检查模拟失败: {e}")
                
            logger.info("用户行为模拟完成")
            
        except Exception as e:
            logger.warning(f"用户行为模拟失败: {e}")

    def _wait_for_page_load(self, driver, url: str, max_wait_time: int = 60) -> bool:
        """
        智能等待页面加载完成，基于 scihub-cn 的策略
        
        Args:
            driver: WebDriver实例
            url: 页面URL
            max_wait_time: 最大等待时间（秒）
            
        Returns:
            bool: 页面是否成功加载
        """
        import random
        
        try:
            # 1. 等待Cloudflare验证（如果存在）
            try:
                WebDriverWait(driver, 10).until(
                    EC.presence_of_element_located((By.ID, "challenge-body-text"))
                )
                logger.info(f"检测到Cloudflare验证，等待处理: {url}")
                # 随机等待时间，模拟人类行为
                wait_time = random.randint(25, 35)
                time.sleep(wait_time)
            except TimeoutException:
                # 没有检测到Cloudflare质询，正常继续
                pass

            # 2. 检查是否被Cloudflare拦截
            if "Just a moment..." in driver.title or "Cloudflare" in driver.page_source:
                logger.error(f"Cloudflare 拦截失败: {url}")
                return False

            # 3. 等待页面基本元素加载 - 增加更长的等待时间
            title_loaded = False
            try:
                # 等待页面标题不再是"请稍候..."，只尝试一次，超时就直接跳过
                WebDriverWait(driver, 45).until(
                    lambda d: d.title and "请稍候" not in d.title and "Please wait" not in d.title and "Just a moment" not in d.title
                )
                logger.info(f"页面标题已加载: {driver.title}")
                title_loaded = True
            except TimeoutException:
                logger.warning(f"页面标题加载超时，直接跳过此条记录: {url}")
                # 页面标题加载超时，直接返回None，让上层处理跳过逻辑
                driver.quit()
                return None

            # 4. 等待DOM完全加载
            try:
                WebDriverWait(driver, 30).until(
                    lambda d: d.execute_script("return document.readyState") == "complete"
                )
                logger.info("DOM加载完成")
            except TimeoutException:
                logger.warning("DOM加载超时")

            # 5. 等待ScienceDirect特定元素 - 增加更多选择器和更长等待时间
            if "sciencedirect.com" in url:
                content_loaded = False
                for attempt in range(2):
                    try:
                        # 等待文章内容区域加载，增加更多可能的选择器
                        WebDriverWait(driver, 30).until(
                            EC.any_of(
                                EC.presence_of_element_located((By.CLASS_NAME, "author-group")),
                                EC.presence_of_element_located((By.CLASS_NAME, "author")),
                                EC.presence_of_element_located((By.CSS_SELECTOR, "[data-testid='author-group']")),
                                EC.presence_of_element_located((By.CSS_SELECTOR, ".author-name")),
                                EC.presence_of_element_located((By.CSS_SELECTOR, ".article-header")),
                                EC.presence_of_element_located((By.CSS_SELECTOR, ".article-title")),
                                EC.presence_of_element_located((By.CSS_SELECTOR, "h1")),
                                EC.presence_of_element_located((By.CSS_SELECTOR, ".title")),
                            )
                        )
                        logger.info("ScienceDirect内容元素已加载")
                        content_loaded = True
                        break
                    except TimeoutException:
                        logger.warning(f"ScienceDirect内容元素加载超时 (尝试 {attempt + 1}/2)")
                        if attempt < 1:
                            # 滚动页面触发懒加载
                            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                            time.sleep(5)
                            driver.execute_script("window.scrollTo(0, 0);")
                            time.sleep(5)

            # 6. 模拟真实用户行为
            self._simulate_human_behavior(driver)
            
            # 7. 额外等待JavaScript渲染 - 增加等待时间
            js_wait_time = random.uniform(15, 30)  # 进一步增加到15-30秒
            time.sleep(js_wait_time)
            logger.info(f"JavaScript渲染等待完成 ({js_wait_time:.1f}秒)")
            
            # 8. 最终检查页面是否真正加载完成
            final_title = driver.title
            if "请稍候" in final_title or "Please wait" in final_title or "Just a moment" in final_title:
                logger.warning(f"页面可能仍未完全加载，当前标题: {final_title}")
                # 再次尝试等待，并模拟用户行为
                self._simulate_human_behavior(driver)
                time.sleep(random.uniform(20, 35))
                final_title = driver.title
                logger.info(f"最终页面标题: {final_title}")

            # 8. 最终验证页面是否正常
            if "Access Denied" in driver.page_source or "403" in driver.title:
                logger.error(f"页面访问被拒绝: {url}")
                return False

            return True

        except Exception as e:
            logger.error(f"页面加载等待过程中出错: {e}")
            return False

    def save_html_for_debug(self, html_content: str, url: str, prefix: str = "debug") -> str:
        """
        保存HTML内容到文件供调试使用
        
        Args:
            html_content: HTML内容
            url: 原始URL
            prefix: 文件名前缀
            
        Returns:
            保存的文件路径
        """
        try:
            # 创建html目录（如果不存在）
            html_dir = os.path.join(os.path.dirname(__file__), 'html')
            os.makedirs(html_dir, exist_ok=True)
            
            # 从URL生成安全的文件名
            from urllib.parse import urlparse
            parsed_url = urlparse(url)
            domain = parsed_url.netloc.replace('.', '_')
            path_parts = parsed_url.path.strip('/').split('/')
            if path_parts and path_parts[-1]:
                article_id = path_parts[-1]
            else:
                article_id = "unknown"
            
            # 生成时间戳
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            
            # 构造文件名
            filename = f"{prefix}_{domain}_{article_id}_{timestamp}.html"
            filepath = os.path.join(html_dir, filename)
            
            # 保存HTML内容
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(html_content)
            
            logger.info(f"HTML页面已保存到: {filepath}")
            return filepath
            
        except Exception as e:
            logger.error(f"保存HTML文件失败: {e}")
            return ""

    def run(self, url: str) -> str:
        """
        新的爬虫入口点，仅获取并返回页面源代码。

        Args:
            url: 要爬取的文章URL。

        Returns:
            页面源代码的字符串。
        """
        logger.info(f"开始爬取URL: {url}")
        try:
            with self.driver_manager as driver:
                driver.get(url)
                self._wait_for_page_load(driver, url)
                self._simulate_human_behavior(driver)
                page_source = driver.page_source
                logger.info(f"成功获取页面源代码，长度为 {len(page_source)}")
                return page_source
        except Exception as e:
            logger.error(f"爬取过程中发生错误: {e}", exc_info=True)
            return ""