#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
TryExponent.com 资源爬虫

这个脚本实现了一个基本的爬虫，用于爬取tryexponent.com网站上的资源
并将其保存到本地目录中。使用firecrawl作为主要爬虫框架，结合crawl4ai进行结构化数据提取。
"""

import os
import json
import logging
import sqlite3
import time
import random
from datetime import datetime
from urllib.parse import urljoin, urlparse, unquote

try:
    from bs4 import BeautifulSoup
    import requests
    from fake_useragent import UserAgent
    from tqdm import tqdm
    from selenium import webdriver
    from selenium.webdriver.chrome.options import Options
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from webdriver_manager.chrome import ChromeDriverManager
except ImportError as e:
    print(f"导入错误: {e}")
    print("请先安装所需依赖: pip install -r requirements.txt")
    exit(1)

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("crawler.log"),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger("ExponentCrawler")


class ExponentCrawler:
    """TryExponent.com 网站爬虫类"""

    def __init__(self, output_dir="./output", db_path=None):
        """初始化爬虫

        Args:
            output_dir: 输出目录路径
            db_path: SQLite数据库路径，用于存储元数据
        """
        self.base_url = "https://www.tryexponent.com"
        self.output_dir = output_dir
        self.db_path = db_path or os.path.join(output_dir, "index.db")
        self.user_agent = UserAgent()

        # 创建输出目录
        os.makedirs(output_dir, exist_ok=True)
        os.makedirs(os.path.join(output_dir, "articles"), exist_ok=True)
        os.makedirs(os.path.join(output_dir, "resources", "pdf"), exist_ok=True)
        os.makedirs(os.path.join(output_dir, "resources", "images"), exist_ok=True)
        os.makedirs(os.path.join(output_dir, "resources", "css"), exist_ok=True)
        os.makedirs(os.path.join(output_dir, "resources", "js"), exist_ok=True)
        os.makedirs(os.path.join(output_dir, "resources", "fonts"), exist_ok=True)

        # 初始化数据库
        self._init_database()

        # 初始化Selenium浏览器
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument(f'user-agent={self.user_agent.random}')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')

        self.browser = webdriver.Chrome(
            service=webdriver.chrome.service.Service(ChromeDriverManager().install()),
            options=chrome_options
        )

        # 已访问的URL集合
        self.visited_urls = set()

        logger.info("爬虫初始化完成")

    def _init_database(self):
        """初始化SQLite数据库"""
        try:
            self.conn = sqlite3.connect(self.db_path)
            self.cursor = self.conn.cursor()

            # 创建资源表
            self.cursor.execute("""
            CREATE TABLE IF NOT EXISTS resources (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                url TEXT UNIQUE,
                title TEXT,
                type TEXT,
                category TEXT,
                local_path TEXT,
                crawl_time TIMESTAMP,
                last_modified TIMESTAMP,
                metadata TEXT
            )
            """)

            # 创建URL队列表
            self.cursor.execute("""
            CREATE TABLE IF NOT EXISTS url_queue (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                url TEXT UNIQUE,
                priority INTEGER DEFAULT 0,
                status TEXT DEFAULT 'pending',  -- 状态可以是: pending, processing, done, error, skipped
                retry_count INTEGER DEFAULT 0,
                last_attempt TIMESTAMP
            )
            """)

            self.conn.commit()
            logger.info("数据库初始化完成")
        except sqlite3.Error as e:
            logger.error(f"数据库初始化失败: {e}")
            raise

    def add_url(self, url, priority=0):
        """添加URL到爬取队列

        Args:
            url: 要爬取的URL
            priority: 优先级，数字越大优先级越高
        """
        if not url.startswith("http"):
            url = urljoin(self.base_url, url)

        # 根据URL路径设置优先级
        path = urlparse(url).path.lower()

        # 设置优先级
        if '/courses/' in path:
            priority = max(priority, 100)  # 课程页面最高优先级
        elif '/questions/' in path:
            priority = max(priority, 90)   # 问题页面次高优先级
        elif '/blog/' in path:
            priority = max(priority, 80)   # 博客页面优先级
        elif '/guides/' in path:
            priority = max(priority, 70)   # 指南页面优先级
        elif '/coaching/' in path:
            priority = max(priority, 60)   # 教练页面优先级
        elif '/practice/' in path:
            priority = max(priority, 50)   # 练习页面优先级
        elif path.endswith(('.css', '.js', '.png', '.jpg', '.jpeg', '.gif', '.svg', '.woff', '.woff2', '.ttf')):
            priority = max(priority, 30)   # 资源文件优先级

        try:
            self.cursor.execute(
                "INSERT OR IGNORE INTO url_queue (url, priority, status) VALUES (?, ?, ?)",
                (url, priority, "pending")
            )
            self.conn.commit()
        except sqlite3.Error as e:
            logger.error(f"添加URL到队列失败: {e}")

    def get_next_url(self):
        """从队列中获取下一个要爬取的URL"""
        try:
            self.cursor.execute(
                """SELECT url FROM url_queue
                   WHERE status = 'pending'
                   ORDER BY priority DESC, id ASC LIMIT 1"""
            )
            result = self.cursor.fetchone()
            if result:
                url = result[0]
                self.cursor.execute(
                    "UPDATE url_queue SET status = 'processing', last_attempt = ? WHERE url = ?",
                    (datetime.now(), url)
                )
                self.conn.commit()
                return url
            return None
        except sqlite3.Error as e:
            logger.error(f"获取下一个URL失败: {e}")
            return None

    def mark_url_done(self, url, status="done"):
        """标记URL为已完成状态"""
        try:
            self.cursor.execute(
                "UPDATE url_queue SET status = ? WHERE url = ?",
                (status, url)
            )
            self.conn.commit()
        except sqlite3.Error as e:
            logger.error(f"标记URL状态失败: {e}")

    def extract_links(self, html, base_url):
        """从HTML中提取链接

        Args:
            html: HTML内容
            base_url: 基础URL，用于构建完整URL

        Returns:
            list: 提取的链接列表
        """
        soup = BeautifulSoup(html, 'lxml')
        links = []

        for a_tag in soup.find_all('a', href=True):
            href = a_tag['href']
            full_url = urljoin(base_url, href)

            # 只保留同域名的URL
            if urlparse(full_url).netloc == urlparse(self.base_url).netloc:
                links.append(full_url)

        return links

    def detect_resource_type(self, url, content_type=None):
        """检测资源类型

        Args:
            url: 资源URL
            content_type: HTTP响应的Content-Type

        Returns:
            str: 资源类型 (article, pdf, image, css, js, etc.)
        """
        path = urlparse(url).path.lower()

        if path.endswith('.pdf'):
            return "pdf"
        elif path.endswith(('.jpg', '.jpeg', '.png', '.gif', '.svg', '.webp', '.ico')):
            return "image"
        elif path.endswith(('.mp4', '.webm', '.avi', '.mov')):
            return "video"
        elif path.endswith(('.css')):
            return "css"
        elif path.endswith(('.js')):
            return "js"
        elif path.endswith(('.woff', '.woff2', '.ttf', '.eot', '.otf')):
            return "font"
        elif content_type and 'application/pdf' in content_type:
            return "pdf"
        elif content_type and 'image/' in content_type:
            return "image"
        elif content_type and 'video/' in content_type:
            return "video"
        elif content_type and 'text/css' in content_type:
            return "css"
        elif content_type and ('application/javascript' in content_type or 'text/javascript' in content_type):
            return "js"
        elif content_type and 'font/' in content_type:
            return "font"
        else:
            return "article"

    def download_resource(self, url, resource_type, html_content=None):
        """下载资源

        Args:
            url: 资源URL
            resource_type: 资源类型
            html_content: 已获取的HTML内容（用于文章类型）

        Returns:
            tuple: (成功标志, 本地路径, 元数据)
        """
        try:
            # 生成文件名
            parsed_url = urlparse(url)
            filename = os.path.basename(parsed_url.path)
            if not filename or '.' not in filename:
                # 如果没有文件名或没有扩展名，生成一个
                if resource_type == "article":
                    filename = f"{parsed_url.netloc.replace('.', '_')}_{hash(url)}.html"
                elif resource_type == "image":
                    filename = f"{parsed_url.netloc.replace('.', '_')}_{hash(url)}.jpg"
                elif resource_type == "css":
                    filename = f"{parsed_url.netloc.replace('.', '_')}_{hash(url)}.css"
                elif resource_type == "js":
                    filename = f"{parsed_url.netloc.replace('.', '_')}_{hash(url)}.js"
                elif resource_type == "font":
                    filename = f"{parsed_url.netloc.replace('.', '_')}_{hash(url)}.woff"
                else:
                    filename = f"{parsed_url.netloc.replace('.', '_')}_{hash(url)}"

            # 确定保存路径
            if resource_type == "article" and html_content:
                category = self._detect_category(url, html_content)
                save_dir = os.path.join(self.output_dir, "articles", category)
                os.makedirs(save_dir, exist_ok=True)

                # 如果文件名不以.html结尾，添加.html后缀
                if not filename.endswith('.html'):
                    local_path = os.path.join(save_dir, f"{filename}.html")
                else:
                    local_path = os.path.join(save_dir, filename)

                # 提取文章标题
                soup = BeautifulSoup(html_content, 'lxml')
                title = soup.title.string if soup.title else filename

                # 保存HTML内容
                with open(local_path, 'w', encoding='utf-8') as f:
                    f.write(html_content)

                # 提取结构化内容
                structured_data = {
                    "text": soup.get_text(strip=True),
                    "links": [a.get('href') for a in soup.find_all('a', href=True)],
                    "images": [img.get('src') for img in soup.find_all('img', src=True)],
                    "css": [link.get('href') for link in soup.find_all('link', rel="stylesheet")],
                    "js": [script.get('src') for script in soup.find_all('script', src=True)]
                }

                metadata = {
                    "title": title,
                    "extracted_data": structured_data,
                    "url": url
                }

                # 下载页面中的资源
                self._download_page_resources(url, soup)

            elif resource_type in ["pdf", "image", "video", "css", "js", "font"]:
                # 确定保存目录
                if resource_type == "js":
                    save_dir = os.path.join(self.output_dir, "resources", "js")
                elif resource_type == "css":
                    save_dir = os.path.join(self.output_dir, "resources", "css")
                elif resource_type == "font":
                    save_dir = os.path.join(self.output_dir, "resources", "fonts")
                else:
                    save_dir = os.path.join(self.output_dir, "resources", resource_type)

                os.makedirs(save_dir, exist_ok=True)
                local_path = os.path.join(save_dir, filename)

                # 使用requests下载资源
                headers = {
                    'User-Agent': self.user_agent.random,
                    'Referer': self.base_url
                }

                response = requests.get(url, headers=headers, stream=True, timeout=30)
                response.raise_for_status()

                # 保存二进制内容
                total_size = int(response.headers.get('content-length', 0))
                with open(local_path, 'wb') as f:
                    with tqdm(total=total_size, unit='B', unit_scale=True, desc=filename) as pbar:
                        for chunk in response.iter_content(chunk_size=8192):
                            if chunk:
                                f.write(chunk)
                                pbar.update(len(chunk))

                metadata = {"headers": dict(response.headers)}
            else:
                logger.warning(f"未知资源类型: {resource_type}, URL: {url}")
                return False, None, None

            # 记录到数据库
            self._save_resource_metadata(
                url=url,
                title=metadata.get("title", filename),
                resource_type=resource_type,
                category=category if resource_type == "article" else "",
                local_path=local_path,
                metadata=json.dumps(metadata)
            )

            logger.info(f"下载完成: {url} -> {local_path}")
            return True, local_path, metadata

        except Exception as e:
            logger.error(f"下载资源失败: {url}, 错误: {e}")
            return False, None, None

    def _download_page_resources(self, base_url, soup):
        """下载页面中的资源

        Args:
            base_url: 页面URL
            soup: BeautifulSoup对象
        """
        # 下载图片
        for img in soup.find_all('img', src=True):
            img_url = urljoin(base_url, img['src'])
            if urlparse(img_url).netloc == urlparse(self.base_url).netloc:
                if img_url not in self.visited_urls:
                    self.add_url(img_url)

        # 下载CSS
        for link in soup.find_all('link', rel="stylesheet", href=True):
            css_url = urljoin(base_url, link['href'])
            if urlparse(css_url).netloc == urlparse(self.base_url).netloc:
                if css_url not in self.visited_urls:
                    self.add_url(css_url)

        # 下载JavaScript
        for script in soup.find_all('script', src=True):
            js_url = urljoin(base_url, script['src'])
            if urlparse(js_url).netloc == urlparse(self.base_url).netloc:
                if js_url not in self.visited_urls:
                    self.add_url(js_url)

        # 下载字体
        # 字体通常在CSS中引用，这里只处理直接引用的字体
        for link in soup.find_all('link', rel="font", href=True):
            font_url = urljoin(base_url, link['href'])
            if urlparse(font_url).netloc == urlparse(self.base_url).netloc:
                if font_url not in self.visited_urls:
                    self.add_url(font_url)

    def _detect_category(self, url, html_content):
        """检测文章类别

        Args:
            url: 文章URL
            html_content: HTML内容

        Returns:
            str: 文章类别
        """
        # 从URL路径中提取类别
        path = urlparse(url).path
        parts = [p for p in path.split('/') if p]

        if len(parts) > 0:
            return parts[0]

        # 如果URL中没有明显类别，尝试从内容中提取
        soup = BeautifulSoup(html_content, 'lxml')

        # 查找面包屑导航
        breadcrumbs = soup.select('.breadcrumb, .breadcrumbs, nav[aria-label="breadcrumb"]')
        if breadcrumbs:
            crumbs = breadcrumbs[0].get_text(strip=True, separator='|').split('|')
            if len(crumbs) > 1:
                return crumbs[1].lower().replace(' ', '_')

        # 默认类别
        return "general"

    def _save_resource_metadata(self, url, title, resource_type, category, local_path, metadata):
        """保存资源元数据到数据库"""
        try:
            now = datetime.now()
            self.cursor.execute(
                """INSERT OR REPLACE INTO resources
                   (url, title, type, category, local_path, crawl_time, last_modified, metadata)
                   VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
                (url, title, resource_type, category, local_path, now, now, metadata)
            )
            self.conn.commit()
        except sqlite3.Error as e:
            logger.error(f"保存资源元数据失败: {e}")

    def check_resource_updated(self, url, resource_type):
        """检查资源是否需要更新

        Args:
            url: 资源URL
            resource_type: 资源类型

        Returns:
            bool: 是否需要更新
        """
        try:
            # 查询数据库中的资源记录
            self.cursor.execute(
                "SELECT crawl_time, metadata FROM resources WHERE url = ?",
                (url,)
            )
            result = self.cursor.fetchone()

            # 如果没有记录，需要爬取
            if not result:
                return True

            # 如果是文章类型，总是检查更新
            if resource_type == "article":
                return True

            # 对于其他资源，发送HEAD请求检查是否更新
            crawl_time, metadata_json = result
            metadata = json.loads(metadata_json) if metadata_json else {}

            headers = {
                'User-Agent': self.user_agent.random,
                'If-Modified-Since': metadata.get('headers', {}).get('Last-Modified', ''),
                'If-None-Match': metadata.get('headers', {}).get('ETag', '')
            }

            response = requests.head(url, headers=headers, timeout=10)

            # 如果返回304，表示资源未修改
            if response.status_code == 304:
                logger.info(f"资源未修改: {url}")
                return False

            # 如果返回200，检查Last-Modified和ETag
            if response.status_code == 200:
                # 如果没有Last-Modified和ETag，则需要更新
                if 'Last-Modified' not in response.headers and 'ETag' not in response.headers:
                    return True

                # 如果有Last-Modified，比较时间
                if 'Last-Modified' in response.headers and 'Last-Modified' in metadata.get('headers', {}):
                    old_time = metadata['headers']['Last-Modified']
                    new_time = response.headers['Last-Modified']
                    if old_time != new_time:
                        return True

                # 如果有ETag，比较值
                if 'ETag' in response.headers and 'ETag' in metadata.get('headers', {}):
                    old_etag = metadata['headers']['ETag']
                    new_etag = response.headers['ETag']
                    if old_etag != new_etag:
                        return True

            # 默认不更新
            return False

        except Exception as e:
            logger.error(f"检查资源更新失败: {url}, 错误: {e}")
            # 出错时默认更新
            return True

    def crawl(self, max_pages=30000, incremental=False, resume=False, max_retries=3, timeout=300, browser_reset_interval=100):
        """开始爬取

        Args:
            max_pages: 最大爬取页面数
            incremental: 是否使用增量爬取，只更新已修改的内容
            resume: 是否使用续传爬取，只爬取未爬取过的内容
            max_retries: 最大重试次数
            timeout: 请求超时时间（秒）
            browser_reset_interval: 每爬取多少页面后重置浏览器，防止内存泄漏
        """
        # 增量爬取和续传爬取不能同时使用
        if incremental and resume:
            logger.warning("增量爬取和续传爬取不能同时使用，将使用增量爬取模式")
            resume = False

        mode = '增量' if incremental else '续传' if resume else '全新'
        logger.info(f"开始{mode}爬取，最大页面数: {max_pages}, 超时时间: {timeout}秒")

        # 添加起始URL
        self.add_url(self.base_url, priority=10)

        # 加载已访问URL
        if incremental or resume:
            self._load_visited_urls()
            logger.info(f"已加载 {len(self.visited_urls)} 个已访问URL")

        pages_crawled = 0
        pages_updated = 0
        pages_skipped = 0
        pages_failed = 0
        pages_since_reset = 0  # 记录自上次重置浏览器后爬取的页面数

        while pages_crawled < max_pages:
            url = self.get_next_url()
            if not url:
                logger.info("队列为空，爬取完成")
                break

            # 如果URL已访问，根据模式决定是否跳过
            if url in self.visited_urls:
                # 如果是续传爬取，直接跳过已爬取的URL
                if resume:
                    self.mark_url_done(url)
                    pages_skipped += 1
                    continue
                # 如果不是增量爬取，也跳过已爬取的URL
                elif not incremental:
                    self.mark_url_done(url)
                    continue

            # 检测资源类型
            resource_type = self.detect_resource_type(url)

            # 如果是增量爬取，检查资源是否需要更新
            if incremental and url in self.visited_urls:
                if not self.check_resource_updated(url, resource_type):
                    self.mark_url_done(url)
                    pages_skipped += 1
                    continue

            logger.info(f"爬取: {url}")

            # 重试机制
            success = False
            retry_count = 0
            last_error = None

            while not success and retry_count < max_retries:
                try:
                    # 如果是重试，添加随机延迟
                    if retry_count > 0:
                        delay_time = random.uniform(1, 5) * retry_count
                        logger.info(f"第 {retry_count} 次重试 {url}, 延迟 {delay_time:.2f} 秒")
                        time.sleep(delay_time)

                    # 设置超时时间
                    self.browser.set_page_load_timeout(timeout)

                    # 检查URL是否包含登录相关关键词
                    if '/login/' in url or 'signin' in url or 'auth' in url or 'account' in url:
                        # 跳过需要登录的页面
                        logger.info(f"跳过需要登录的页面: {url}")
                        self.mark_url_done(url, status="skipped")
                        pages_skipped += 1
                        success = True
                        continue

                    try:
                        # 使用Selenium获取页面内容
                        self.browser.get(url)
                        WebDriverWait(self.browser, timeout).until(
                            EC.presence_of_element_located((By.TAG_NAME, "body"))
                        )
                        html = self.browser.page_source
                        headers = {'Content-Type': 'text/html'}
                    except Exception as e:
                        # 如果浏览器出错，尝试重新初始化浏览器
                        logger.warning(f"浏览器错误: {e}, 尝试重新初始化浏览器")
                        try:
                            self.browser.quit()
                        except:
                            pass

                        # 重新初始化浏览器
                        chrome_options = Options()
                        chrome_options.add_argument('--headless')
                        chrome_options.add_argument(f'user-agent={self.user_agent.random}')
                        chrome_options.add_argument('--disable-gpu')
                        chrome_options.add_argument('--no-sandbox')
                        chrome_options.add_argument('--disable-dev-shm-usage')

                        self.browser = webdriver.Chrome(
                            service=webdriver.chrome.service.Service(ChromeDriverManager().install()),
                            options=chrome_options
                        )

                        # 重新尝试获取页面
                        self.browser.set_page_load_timeout(timeout)
                        self.browser.get(url)
                        WebDriverWait(self.browser, timeout).until(
                            EC.presence_of_element_located((By.TAG_NAME, "body"))
                        )
                        html = self.browser.page_source
                        headers = {'Content-Type': 'text/html'}

                    # 检测资源类型
                    resource_type = self.detect_resource_type(
                        url,
                        content_type=headers.get('Content-Type')
                    )

                    # 下载资源
                    download_success, _, _ = self.download_resource(url, resource_type, html)

                    if download_success:
                        # 提取链接并添加到队列
                        links = self.extract_links(html, url)
                        for link in links:
                            if not incremental or link not in self.visited_urls:
                                self.add_url(link)

                    self.visited_urls.add(url)
                    self.mark_url_done(url)
                    pages_crawled += 1

                    # 如果是增量爬取，记录更新数量
                    if incremental and url in self.visited_urls:
                        pages_updated += 1

                    # 只显示简单的进度信息，不进行完整的状态嗅探
                    logger.info(f"进度: {pages_crawled}/{max_pages} (更新: {pages_updated}, 跳过: {pages_skipped}, 失败: {pages_failed})")

                    # 增加自上次重置浏览器后爬取的页面数
                    pages_since_reset += 1

                    # 如果达到重置间隔，重置浏览器
                    if pages_since_reset >= browser_reset_interval:
                        logger.info(f"已爬取 {pages_since_reset} 个页面，重置浏览器以防止内存泄漏")
                        try:
                            self.browser.quit()
                        except:
                            pass

                        # 重新初始化浏览器
                        chrome_options = Options()
                        chrome_options.add_argument('--headless')
                        chrome_options.add_argument(f'user-agent={self.user_agent.random}')
                        chrome_options.add_argument('--disable-gpu')
                        chrome_options.add_argument('--no-sandbox')
                        chrome_options.add_argument('--disable-dev-shm-usage')

                        self.browser = webdriver.Chrome(
                            service=webdriver.chrome.service.Service(ChromeDriverManager().install()),
                            options=chrome_options
                        )

                        # 重置计数器
                        pages_since_reset = 0

                    success = True

                except Exception as e:
                    last_error = e
                    retry_count += 1
                    logger.warning(f"爬取失败: {url}, 错误: {e}, 重试 {retry_count}/{max_retries}")

                    # 如果是超时错误，重置浏览器
                    if "timeout" in str(e).lower():
                        try:
                            self.browser.execute_script("window.stop();")
                        except:
                            pass

            # 如果所有重试都失败
            if not success:
                logger.error(f"爬取失败（所有重试均失败）: {url}, 错误: {last_error}")
                self.mark_url_done(url, status="error")
                pages_failed += 1

        # 只显示简单的完成信息，不进行完整的状态嗅探
        logger.info(f"爬取完成，共爬取 {pages_crawled} 个页面 (更新: {pages_updated}, 跳过: {pages_skipped}, 失败: {pages_failed})")
        logger.info("如需查看详细状态报告，请使用 'python3 run.py status' 命令")

    def _load_visited_urls(self):
        """从数据库加载已访问URL"""
        try:
            self.cursor.execute("SELECT url FROM resources")
            for (url,) in self.cursor.fetchall():
                self.visited_urls.add(url)
        except sqlite3.Error as e:
            logger.error(f"加载已访问URL失败: {e}")

    def sniff_status(self):
        """嗅探爬取状态，返回未爬取的URL数量和其他统计信息"""
        try:
            # 获取URL队列状态统计
            self.cursor.execute("""
            SELECT status, COUNT(*) FROM url_queue GROUP BY status
            """)
            status_counts = dict(self.cursor.fetchall())

            # 获取资源类型统计
            self.cursor.execute("""
            SELECT type, COUNT(*) FROM resources GROUP BY type
            """)
            type_counts = dict(self.cursor.fetchall())

            # 获取总URL数
            self.cursor.execute("SELECT COUNT(*) FROM url_queue")
            total_urls = self.cursor.fetchone()[0]

            # 获取已爬取URL数
            self.cursor.execute("SELECT COUNT(*) FROM resources")
            crawled_urls = self.cursor.fetchone()[0]

            # 计算剩余URL数
            pending_urls = status_counts.get('pending', 0)
            processing_urls = status_counts.get('processing', 0)
            done_urls = status_counts.get('done', 0)
            error_urls = status_counts.get('error', 0)
            skipped_urls = status_counts.get('skipped', 0)

            # 计算完成百分比
            completion_percentage = (done_urls / total_urls * 100) if total_urls > 0 else 0

            # 获取最近爬取速度（每小时URL数）
            self.cursor.execute("""
            SELECT COUNT(*) FROM resources
            WHERE crawl_time > datetime('now', '-1 hour')
            """)
            recent_crawl_rate = self.cursor.fetchone()[0]

            # 估算剩余时间（小时）
            estimated_hours_left = (pending_urls / recent_crawl_rate) if recent_crawl_rate > 0 else 0

            # 返回状态报告
            status_report = {
                "total_urls": total_urls,
                "crawled_urls": crawled_urls,
                "pending_urls": pending_urls,
                "processing_urls": processing_urls,
                "done_urls": done_urls,
                "error_urls": error_urls,
                "skipped_urls": skipped_urls,
                "completion_percentage": completion_percentage,
                "recent_crawl_rate": recent_crawl_rate,  # 每小时URL数
                "estimated_hours_left": estimated_hours_left,
                "resource_types": type_counts
            }

            return status_report

        except sqlite3.Error as e:
            logger.error(f"嗅探状态失败: {e}")
            return {
                "error": str(e),
                "pending_urls": 0,
                "completion_percentage": 0
            }

    def print_status_report(self):
        """打印爬取状态报告"""
        status = self.sniff_status()

        print("\n===== 爬虫状态报告 =====")
        print(f"总URL数: {status['total_urls']}")
        print(f"已爬取URL数: {status['crawled_urls']}")
        print(f"待爬取URL数: {status['pending_urls']}")
        print(f"正在处理URL数: {status['processing_urls']}")
        print(f"已完成URL数: {status['done_urls']}")
        print(f"失败URL数: {status['error_urls']}")
        print(f"跳过URL数: {status['skipped_urls']}")
        print(f"完成百分比: {status['completion_percentage']:.2f}%")
        print(f"最近爬取速度: {status['recent_crawl_rate']} URL/小时")

        # 计算剩余时间并转换为更友好的格式
        hours_left = status['estimated_hours_left']
        if hours_left > 24:
            days = int(hours_left / 24)
            remaining_hours = hours_left % 24
            time_str = f"{days} 天 {remaining_hours:.1f} 小时"
        else:
            time_str = f"{hours_left:.2f} 小时"
        print(f"预计剩余时间: {time_str}")

        # 计算预计完成时间
        if hours_left > 0:
            from datetime import datetime, timedelta
            completion_time = datetime.now() + timedelta(hours=hours_left)
            print(f"预计完成时间: {completion_time.strftime('%Y-%m-%d %H:%M:%S')}")

        print("\n资源类型统计:")
        for resource_type, count in status.get('resource_types', {}).items():
            print(f"  {resource_type}: {count}")

        # 添加提示信息
        print("\n提示:")
        print("- 此状态报告不会影响爬取速度，可随时运行查看")
        print("- 如需实时监控爬取进度，可使用定时任务执行此命令")
        print("- 预计时间基于最近一小时的爬取速度，仅供参考")

        print("=======================\n")
        return status

    def close(self):
        """关闭爬虫，释放资源"""
        if hasattr(self, 'conn') and self.conn:
            self.conn.close()
        if hasattr(self, 'browser') and self.browser:
            self.browser.quit()
        logger.info("爬虫已关闭")


def main():
    """主函数"""
    import argparse
    import sys

    parser = argparse.ArgumentParser(description='TryExponent.com 资源爬虫')
    parser.add_argument('-o', '--output', default='./output', help='输出目录')
    parser.add_argument('-m', '--max-pages', type=int, default=30000, help='最大爬取页面数')
    parser.add_argument('--db', help='数据库路径')
    parser.add_argument('--delay', type=float, default=0.3, help='请求间隔时间（秒）')
    parser.add_argument('--incremental', action='store_true', help='使用增量爬取，只更新已修改的内容')
    parser.add_argument('--resume', action='store_true', help='使用续传爬取，只爬取未爬取过的内容')
    parser.add_argument('--analyze-only', action='store_true', help='仅分析网站规模，不下载内容')
    parser.add_argument('--fix-paths', action='store_true', help='修复HTML文件中的资源路径')
    parser.add_argument('--serve', action='store_true', help='启动本地服务器')
    parser.add_argument('--port', type=int, default=8000, help='本地服务器端口')
    parser.add_argument('--status', action='store_true', help='显示爬取状态报告')
    parser.add_argument('--browser-reset', type=int, default=100, help='每爬取多少页面后重置浏览器，防止内存泄漏')

    args = parser.parse_args()

    # 如果选择了显示爬取状态报告
    if args.status:
        try:
            crawler = ExponentCrawler(output_dir=args.output, db_path=args.db)
            crawler.print_status_report()
        except Exception as e:
            logger.error(f"显示状态报告时发生错误: {e}")
        finally:
            if 'crawler' in locals():
                crawler.close()
        return

    # 如果选择了仅分析网站规模
    if args.analyze_only:
        try:
            # 导入网站分析器
            from site_analyzer import SiteAnalyzer
            analyzer = SiteAnalyzer(db_path=args.db or os.path.join(args.output, "site_analysis.db"))
            analyzer.analyze(max_urls=args.max_pages, delay=args.delay)
        except ImportError:
            logger.error("找不到site_analyzer.py模块，请确保它与本脚本在同一目录下")
            sys.exit(1)
        except KeyboardInterrupt:
            logger.info("用户中断分析")
        except Exception as e:
            logger.error(f"分析过程中发生错误: {e}")
        finally:
            if 'analyzer' in locals():
                analyzer.close()
        return

    # 如果选择了修复路径或启动本地服务器
    if args.fix_paths or args.serve:
        try:
            # 导入本地服务器模块
            from local_server import PathFixer, run_server

            # 如果选择了修复路径
            if args.fix_paths:
                logger.info("开始修复HTML文件中的资源路径...")
                fixer = PathFixer(args.output)
                fixer.fix_all_files()
                fixer.close()
                logger.info("路径修复完成")

            # 如果选择了启动本地服务器
            if args.serve:
                logger.info(f"启动本地服务器，端口: {args.port}")
                run_server(args.output, args.port)
        except ImportError:
            logger.error("找不到local_server.py模块，请确保它与本脚本在同一目录下")
            sys.exit(1)
        except Exception as e:
            logger.error(f"运行本地服务器时发生错误: {e}")
        return

    # 执行正常的爬取操作
    try:
        crawler = ExponentCrawler(output_dir=args.output, db_path=args.db)
        crawler.crawl(
            max_pages=args.max_pages,
            incremental=args.incremental,
            resume=args.resume,
            max_retries=3,  # 最大重试次数
            timeout=300,    # 超时时间（秒）
            browser_reset_interval=args.browser_reset  # 浏览器重置间隔
        )
    except KeyboardInterrupt:
        logger.info("用户中断爬取")
    except Exception as e:
        logger.error(f"爬取过程中发生错误: {e}")
    finally:
        if 'crawler' in locals():
            crawler.close()


if __name__ == "__main__":
    main()