#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
TryExponent.com 网站规模嗅探工具

这个脚本用于分析tryexponent.com网站的规模和结构，
只收集URL而不下载内容，以便评估完整爬取所需的资源。
"""

import os
import json
import logging
import sqlite3
import time
from datetime import datetime
from urllib.parse import urljoin, urlparse
from collections import defaultdict, Counter

try:
    from bs4 import BeautifulSoup
    import requests
    from fake_useragent import UserAgent
    from tqdm import tqdm
    from selenium import webdriver
    from selenium.webdriver.chrome.options import Options
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from webdriver_manager.chrome import ChromeDriverManager
except ImportError as e:
    print(f"导入错误: {e}")
    print("请先安装所需依赖: pip install -r requirements.txt")
    exit(1)

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("analyzer.log"),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger("SiteAnalyzer")


class SiteAnalyzer:
    """网站规模分析器"""
    
    def __init__(self, base_url="https://www.tryexponent.com", db_path="./site_analysis.db"):
        """初始化分析器
        
        Args:
            base_url: 网站基础URL
            db_path: SQLite数据库路径，用于存储URL信息
        """
        self.base_url = base_url
        self.db_path = db_path
        self.user_agent = UserAgent()
        
        # 初始化数据库
        self._init_database()
        
        # 初始化Selenium浏览器
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument(f'user-agent={self.user_agent.random}')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        
        self.browser = webdriver.Chrome(
            service=webdriver.chrome.service.Service(ChromeDriverManager().install()),
            options=chrome_options
        )
        
        # 已访问的URL集合
        self.visited_urls = set()
        
        # 统计信息
        self.stats = {
            "total_urls": 0,
            "url_by_type": defaultdict(int),
            "url_by_category": defaultdict(int),
            "url_by_depth": defaultdict(int),
            "resource_count": defaultdict(int),
            "estimated_size": 0
        }
        
        logger.info("网站分析器初始化完成")
    
    def _init_database(self):
        """初始化SQLite数据库"""
        try:
            self.conn = sqlite3.connect(self.db_path)
            self.cursor = self.conn.cursor()
            
            # 创建URL表
            self.cursor.execute("""
            CREATE TABLE IF NOT EXISTS urls (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                url TEXT UNIQUE,
                type TEXT,
                category TEXT,
                depth INTEGER,
                discovered_time TIMESTAMP,
                visited BOOLEAN DEFAULT 0,
                visit_time TIMESTAMP,
                parent_url TEXT,
                estimated_size INTEGER DEFAULT 0
            )
            """)
            
            # 创建URL队列表
            self.cursor.execute("""
            CREATE TABLE IF NOT EXISTS url_queue (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                url TEXT UNIQUE,
                priority INTEGER DEFAULT 0,
                status TEXT DEFAULT 'pending',
                retry_count INTEGER DEFAULT 0,
                last_attempt TIMESTAMP
            )
            """)
            
            self.conn.commit()
            logger.info("数据库初始化完成")
        except sqlite3.Error as e:
            logger.error(f"数据库初始化失败: {e}")
            raise
    
    def add_url(self, url, parent_url=None, depth=0, priority=0):
        """添加URL到队列
        
        Args:
            url: 要分析的URL
            parent_url: 发现该URL的父URL
            depth: URL深度
            priority: 优先级，数字越大优先级越高
        """
        if not url.startswith("http"):
            url = urljoin(self.base_url, url)
        
        # 规范化URL
        parsed = urlparse(url)
        url = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
        if parsed.query:
            url += f"?{parsed.query}"
        
        try:
            now = datetime.now()
            
            # 添加到URL表
            self.cursor.execute(
                """INSERT OR IGNORE INTO urls 
                   (url, depth, discovered_time, parent_url) 
                   VALUES (?, ?, ?, ?)""",
                (url, depth, now, parent_url)
            )
            
            # 添加到队列
            self.cursor.execute(
                "INSERT OR IGNORE INTO url_queue (url, priority, status) VALUES (?, ?, ?)",
                (url, priority, "pending")
            )
            
            self.conn.commit()
            self.stats["total_urls"] += 1
            self.stats["url_by_depth"][depth] += 1
        except sqlite3.Error as e:
            logger.error(f"添加URL到队列失败: {e}")
    
    def get_next_url(self):
        """从队列中获取下一个要分析的URL"""
        try:
            self.cursor.execute(
                """SELECT url FROM url_queue 
                   WHERE status = 'pending' 
                   ORDER BY priority DESC, id ASC LIMIT 1"""
            )
            result = self.cursor.fetchone()
            if result:
                url = result[0]
                self.cursor.execute(
                    "UPDATE url_queue SET status = 'processing', last_attempt = ? WHERE url = ?",
                    (datetime.now(), url)
                )
                self.conn.commit()
                return url
            return None
        except sqlite3.Error as e:
            logger.error(f"获取下一个URL失败: {e}")
            return None
    
    def mark_url_done(self, url, status="done", resource_type=None, category=None, estimated_size=0):
        """标记URL为已完成状态"""
        try:
            # 更新队列状态
            self.cursor.execute(
                "UPDATE url_queue SET status = ? WHERE url = ?",
                (status, url)
            )
            
            # 更新URL信息
            self.cursor.execute(
                """UPDATE urls SET 
                   visited = 1, 
                   visit_time = ?,
                   type = ?,
                   category = ?,
                   estimated_size = ?
                   WHERE url = ?""",
                (datetime.now(), resource_type, category, estimated_size, url)
            )
            
            self.conn.commit()
            
            # 更新统计信息
            if resource_type:
                self.stats["url_by_type"][resource_type] += 1
            if category:
                self.stats["url_by_category"][category] += 1
            self.stats["estimated_size"] += estimated_size
        except sqlite3.Error as e:
            logger.error(f"标记URL状态失败: {e}")
    
    def extract_links(self, html, base_url, current_depth):
        """从HTML中提取链接
        
        Args:
            html: HTML内容
            base_url: 基础URL，用于构建完整URL
            current_depth: 当前URL的深度
            
        Returns:
            list: 提取的链接列表
        """
        soup = BeautifulSoup(html, 'lxml')
        links = []
        
        # 提取所有链接
        for a_tag in soup.find_all('a', href=True):
            href = a_tag['href']
            full_url = urljoin(base_url, href)
            
            # 只保留同域名的URL
            if urlparse(full_url).netloc == urlparse(self.base_url).netloc:
                links.append(full_url)
                # 添加到队列，深度+1
                self.add_url(full_url, parent_url=base_url, depth=current_depth+1)
        
        # 提取资源链接（图片、CSS、JS等）
        for tag in soup.find_all(['img', 'script', 'link']):
            src = tag.get('src') or tag.get('href')
            if src:
                full_url = urljoin(base_url, src)
                if urlparse(full_url).netloc == urlparse(self.base_url).netloc:
                    resource_type = self._get_resource_type(full_url)
                    self.stats["resource_count"][resource_type] += 1
        
        return links
    
    def _get_resource_type(self, url):
        """获取资源类型"""
        path = urlparse(url).path.lower()
        
        if path.endswith('.pdf'):
            return "pdf"
        elif path.endswith(('.jpg', '.jpeg', '.png', '.gif', '.svg')):
            return "image"
        elif path.endswith(('.mp4', '.webm', '.avi')):
            return "video"
        elif path.endswith(('.css')):
            return "css"
        elif path.endswith(('.js')):
            return "javascript"
        elif path.endswith(('.woff', '.woff2', '.ttf', '.eot')):
            return "font"
        else:
            return "other"
    
    def _detect_category(self, url, html_content):
        """检测页面类别
        
        Args:
            url: 页面URL
            html_content: HTML内容
            
        Returns:
            str: 页面类别
        """
        # 从URL路径中提取类别
        path = urlparse(url).path
        parts = [p for p in path.split('/') if p]
        
        if len(parts) > 0:
            return parts[0]
        
        # 如果URL中没有明显类别，尝试从内容中提取
        soup = BeautifulSoup(html_content, 'lxml')
        
        # 查找面包屑导航
        breadcrumbs = soup.select('.breadcrumb, .breadcrumbs, nav[aria-label="breadcrumb"]')
        if breadcrumbs:
            crumbs = breadcrumbs[0].get_text(strip=True, separator='|').split('|')
            if len(crumbs) > 1:
                return crumbs[1].lower().replace(' ', '_')
        
        # 默认类别
        return "general"
    
    def _estimate_page_size(self, html_content):
        """估计页面大小（包括所有资源）
        
        Args:
            html_content: HTML内容
            
        Returns:
            int: 估计大小（字节）
        """
        # HTML大小
        html_size = len(html_content.encode('utf-8'))
        
        # 估计资源大小
        soup = BeautifulSoup(html_content, 'lxml')
        
        # 图片平均大小：100KB
        img_count = len(soup.find_all('img'))
        img_size = img_count * 100 * 1024
        
        # CSS平均大小：50KB
        css_count = len(soup.find_all('link', rel='stylesheet'))
        css_size = css_count * 50 * 1024
        
        # JS平均大小：100KB
        js_count = len(soup.find_all('script', src=True))
        js_size = js_count * 100 * 1024
        
        # 字体平均大小：80KB
        font_size = 0  # 难以直接计算，使用估计值
        
        total_size = html_size + img_size + css_size + js_size + font_size
        return total_size
    
    def analyze(self, max_urls=1000, delay=1.0):
        """开始分析网站规模
        
        Args:
            max_urls: 最大分析URL数量
            delay: 请求延迟（秒）
        """
        logger.info(f"开始分析网站规模，最大URL数: {max_urls}")
        
        # 添加起始URL
        self.add_url(self.base_url, priority=10)
        
        urls_analyzed = 0
        start_time = time.time()
        
        with tqdm(total=max_urls, desc="分析进度") as pbar:
            while urls_analyzed < max_urls:
                url = self.get_next_url()
                if not url:
                    logger.info("队列为空，分析完成")
                    break
                
                if url in self.visited_urls:
                    self.mark_url_done(url)
                    continue
                
                try:
                    # 使用Selenium获取页面内容
                    self.browser.get(url)
                    WebDriverWait(self.browser, 10).until(
                        EC.presence_of_element_located((By.TAG_NAME, "body"))
                    )
                    html = self.browser.page_source
                    
                    # 获取当前URL的深度
                    self.cursor.execute("SELECT depth FROM urls WHERE url = ?", (url,))
                    result = self.cursor.fetchone()
                    current_depth = result[0] if result else 0
                    
                    # 检测资源类型和类别
                    resource_type = "html"
                    category = self._detect_category(url, html)
                    
                    # 估计页面大小
                    estimated_size = self._estimate_page_size(html)
                    
                    # 提取链接
                    self.extract_links(html, url, current_depth)
                    
                    # 标记为已完成
                    self.mark_url_done(
                        url, 
                        resource_type=resource_type, 
                        category=category,
                        estimated_size=estimated_size
                    )
                    
                    self.visited_urls.add(url)
                    urls_analyzed += 1
                    pbar.update(1)
                    
                    # 添加延迟
                    time.sleep(delay)
                    
                except Exception as e:
                    logger.error(f"分析失败: {url}, 错误: {e}")
                    self.mark_url_done(url, status="error")
        
        elapsed_time = time.time() - start_time
        logger.info(f"分析完成，共分析 {urls_analyzed} 个URL，耗时 {elapsed_time:.2f} 秒")
        
        # 生成分析报告
        self._generate_report()
    
    def _generate_report(self):
        """生成分析报告"""
        # 查询数据库获取更多统计信息
        try:
            # 获取URL深度分布
            self.cursor.execute(
                "SELECT depth, COUNT(*) FROM urls GROUP BY depth ORDER BY depth"
            )
            depth_distribution = dict(self.cursor.fetchall())
            
            # 获取URL类型分布
            self.cursor.execute(
                "SELECT type, COUNT(*) FROM urls WHERE type IS NOT NULL GROUP BY type"
            )
            type_distribution = dict(self.cursor.fetchall())
            
            # 获取URL类别分布
            self.cursor.execute(
                "SELECT category, COUNT(*) FROM urls WHERE category IS NOT NULL GROUP BY category"
            )
            category_distribution = dict(self.cursor.fetchall())
            
            # 估计总大小
            self.cursor.execute("SELECT SUM(estimated_size) FROM urls")
            total_size = self.cursor.fetchone()[0] or 0
            
            # 生成报告
            report = {
                "total_urls": len(self.visited_urls),
                "total_discovered_urls": self.stats["total_urls"],
                "depth_distribution": depth_distribution,
                "type_distribution": type_distribution,
                "category_distribution": category_distribution,
                "resource_count": dict(self.stats["resource_count"]),
                "estimated_total_size": total_size,
                "estimated_total_size_human": self._format_size(total_size)
            }
            
            # 保存报告
            with open("site_analysis_report.json", "w", encoding="utf-8") as f:
                json.dump(report, f, indent=2, ensure_ascii=False)
            
            # 打印报告摘要
            logger.info("网站规模分析报告:")
            logger.info(f"总URL数: {report['total_urls']}")
            logger.info(f"已发现URL数: {report['total_discovered_urls']}")
            logger.info(f"最大深度: {max(depth_distribution.keys()) if depth_distribution else 0}")
            logger.info(f"估计总大小: {report['estimated_total_size_human']}")
            logger.info(f"详细报告已保存到 site_analysis_report.json")
            
        except sqlite3.Error as e:
            logger.error(f"生成报告失败: {e}")
    
    def _format_size(self, size_bytes):
        """格式化文件大小"""
        for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
            if size_bytes < 1024.0 or unit == 'TB':
                break
            size_bytes /= 1024.0
        return f"{size_bytes:.2f} {unit}"
    
    def close(self):
        """关闭分析器，释放资源"""
        if hasattr(self, 'conn') and self.conn:
            self.conn.close()
        if hasattr(self, 'browser') and self.browser:
            self.browser.quit()
        logger.info("分析器已关闭")


def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='TryExponent.com 网站规模分析工具')
    parser.add_argument('-u', '--url', default='https://www.tryexponent.com', help='网站基础URL')
    parser.add_argument('-m', '--max-urls', type=int, default=1000, help='最大分析URL数量')
    parser.add_argument('-d', '--delay', type=float, default=1.0, help='请求延迟（秒）')
    parser.add_argument('--db', default='./site_analysis.db', help='数据库路径')
    
    args = parser.parse_args()
    
    try:
        analyzer = SiteAnalyzer(base_url=args.url, db_path=args.db)
        analyzer.analyze(max_urls=args.max_urls, delay=args.delay)
    except KeyboardInterrupt:
        logger.info("用户中断分析")
    except Exception as e:
        logger.error(f"分析过程中发生错误: {e}")
    finally:
        if 'analyzer' in locals():
            analyzer.close()


if __name__ == "__main__":
    main()
