import sqlite3
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import RequestException
import logging
import time
from rich.logging import RichHandler
from rich.traceback import install
from typing import List, Tuple

install(show_locals=True)

logging.basicConfig(
    level=logging.INFO,
    format="%(message)s",
    handlers=[RichHandler(
        rich_tracebacks=True,
        markup=True,
        show_time=False,
        show_level=True,
        show_path=False
    )]
)

class Crawler:
    def __init__(self, db_path='search_engine.db'):
        # 初始化数据库时配置性能参数
        self.db = sqlite3.connect(db_path)
        self._configure_db()
        self.session = self._create_session()
        self._initialize_tables()
    
    def _configure_db(self):
        """配置数据库性能参数"""
        self.db.execute("PRAGMA journal_mode = WAL")
        self.db.execute("PRAGMA synchronous = NORMAL")
        self.db.execute("PRAGMA cache_size = -1000000")  # 1GB缓存
        self.db.execute("PRAGMA temp_store = MEMORY")
    
    def _create_session(self):
        """创建优化后的请求会话"""
        session = requests.Session()
        session.mount('https://', HTTPAdapter(max_retries=3, pool_maxsize=20))
        session.mount('http://', HTTPAdapter(max_retries=3, pool_maxsize=20))
        session.headers.update({
            'User-Agent': 'Mozilla/5.0 (compatible; HySearchBot/1.0)',
            'Accept-Language': 'en-US,en;q=0.9'
        })
        return session

    def _initialize_tables(self):
        """维护现有表结构"""
        with self.db:
            self.db.execute('''
                CREATE TABLE IF NOT EXISTS crawler_queue (
                    id INTEGER PRIMARY KEY AUTOINCREMENT,
                    url TEXT UNIQUE NOT NULL,
                    processed INTEGER DEFAULT 0
                )
            ''')
            self.db.execute('''
                CREATE TABLE IF NOT EXISTS pages (
                    id INTEGER PRIMARY KEY AUTOINCREMENT,
                    url TEXT UNIQUE NOT NULL,
                    title TEXT,
                    content TEXT,
                    timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
                )
            ''')

    def add_seed_urls(self, urls: List[str]):
        """批量URL插入优化"""
        if not urls:
            return
        
        with self.db:
            # 使用更高效的冲突处理方式
            try:
                self.db.executemany(
                    "INSERT OR IGNORE INTO crawler_queue (url) VALUES (?)",
                    [(url,) for url in urls]
                )
                new_count = self.db.total_changes
                if new_count > 0:
                    logging.info(f"🎯 [bold green]批量新增 {new_count} 个URL[/]")
            except sqlite3.Error as e:
                logging.error(f"❌ [red]URL插入失败: {e}[/]")

    def _parse_page(self, soup: BeautifulSoup, url: str) -> Tuple[str, str]:
        """增强内容解析稳定性"""
        try:
            # 优化标题提取逻辑
            title = soup.title.get_text(strip=True) if soup.title else ""
            # 正文内容优化处理
            content = ' '.join([
                p.get_text(strip=True) 
                for p in soup.find_all(['p', 'article', 'main'])
            ])[:50000]  # 限制内容长度
        except Exception as e:
            logging.warning(f"⚠️ [yellow]解析异常 {url}: {e}[/]")
            title, content = "", ""
        return title, content

    def crawl(self, max_pages: int = 100, delay: float = 1.0):
        """优化后的爬取流程"""
        batch_size = 50  # 批量处理数量
        total_processed = 0
        
        while total_processed < max_pages:
            # 分批次处理
            with self.db:
                cursor = self.db.execute(
                    "SELECT url FROM crawler_queue WHERE processed=0 LIMIT ?",
                    (batch_size,)
                )
                current_batch = [row[0] for row in cursor]

            if not current_batch:
                break

            pages_data = []
            processed_urls = []
            new_links = set()

            for idx, url in enumerate(current_batch):
                try:
                    # 请求处理
                    start_time = time.monotonic()
                    response = self.session.get(url, timeout=(3.05, 30))
                    response.raise_for_status()

                    # 内容类型检查
                    if 'text/html' not in response.headers.get('Content-Type', ''):
                        continue

                    # 解析内容
                    soup = BeautifulSoup(response.text, 'lxml')
                    title, content = self._parse_page(soup, url)
                    
                    # 收集数据
                    pages_data.append((url, title, content))
                    processed_urls.append(url)

                    # 提取链接
                    for link in soup.find_all('a', href=True):
                        absolute_url = urljoin(url, link['href'])
                        if absolute_url.startswith(('http', 'https')):
                            new_links.add(absolute_url)

                    # 请求间隔控制
                    elapsed = time.monotonic() - start_time
                    if elapsed < delay:
                        time.sleep(delay - elapsed)

                except RequestException as e:
                    logging.error(f"❌ [red]请求失败 {url}: {e}[/]")
                except Exception as e:
                    logging.error(f"🔥 [bold red]处理异常 {url}: {e}[/]", exc_info=True)

            # 批量数据存储
            with self.db:
                try:
                    # 批量存储页面内容
                    if pages_data:
                        self.db.executemany(
                            "INSERT OR IGNORE INTO pages (url, title, content) VALUES (?, ?, ?)",
                            pages_data
                        )
                    
                    # 标记已处理URL
                    if processed_urls:
                        self.db.executemany(
                            "UPDATE crawler_queue SET processed=1 WHERE url=?",
                            [(url,) for url in processed_urls]
                        )
                    
                    # 添加新链接
                    if new_links:
                        self.add_seed_urls(list(new_links))
                    
                    total_processed += len(current_batch)
                    logging.info(f"✅ [green]已处理批次 {total_processed}/{max_pages}[/]")

                except sqlite3.Error as e:
                    logging.error(f"💾 [red]数据库写入失败: {e}[/]")

if __name__ == "__main__":
    crawler = Crawler()
    
    seed_urls = [
        # 原有种子URL列表
    ]
    
    crawler.add_seed_urls(seed_urls)
    crawler.crawl(max_pages=640, delay=0.5)
