import requests
from bs4 import BeautifulSoup
import logging
from urllib.parse import urljoin, urlparse
import time
from typing import Set, List, Dict
import re

class DeepCrawler:
    """
    深度爬虫类，用于爬取微信公众号文章及其相关链接
    """
    def __init__(self, max_depth: int = 2, delay: float = 1.0):
        """
        初始化爬虫
        
        Args:
            max_depth: 最大爬取深度
            delay: 请求间隔时间（秒）
        """
        self.max_depth = max_depth
        self.delay = delay
        self.visited_urls: Set[str] = set()
        self.weixin_urls: Set[str] = set()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.logger = logging.getLogger(__name__)

    def is_weixin_url(self, url: str) -> bool:
        """
        判断URL是否为微信公众号链接
        
        Args:
            url: 待检查的URL
            
        Returns:
            bool: 是否为微信公众号链接
        """
        return 'weixin' in url.lower()

    def get_page_content(self, url: str) -> str:
        """
        获取页面内容
        
        Args:
            url: 目标URL
            
        Returns:
            str: 页面HTML内容
        """
        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            response.raise_for_status()
            return response.text
        except Exception as e:
            self.logger.error(f"Error fetching {url}: {str(e)}")
            return ""

    def extract_links(self, html: str, base_url: str) -> Set[str]:
        """
        从HTML中提取链接
        
        Args:
            html: HTML内容
            base_url: 基础URL
            
        Returns:
            Set[str]: 提取的链接集合
        """
        soup = BeautifulSoup(html, 'html.parser')
        links = set()
        
        for a_tag in soup.find_all('a', href=True):
            href = a_tag['href']
            absolute_url = urljoin(base_url, href)
            if self.is_weixin_url(absolute_url):
                links.add(absolute_url)
                
        return links

    def crawl(self, start_url: str) -> Dict[str, str]:
        """
        开始爬取过程
        
        Args:
            start_url: 起始URL
            
        Returns:
            Dict[str, str]: URL到HTML内容的映射
        """
        results = {}
        urls_to_visit = [(start_url, 0)]  # (url, depth)
        
        while urls_to_visit:
            current_url, depth = urls_to_visit.pop(0)
            
            if current_url in self.visited_urls or depth > self.max_depth:
                continue
                
            self.visited_urls.add(current_url)
            self.logger.info(f"Crawling {current_url} at depth {depth}")
            
            # 获取页面内容
            html_content = self.get_page_content(current_url)
            if html_content:
                results[current_url] = html_content
                
                if depth < self.max_depth:
                    # 提取新的链接
                    new_links = self.extract_links(html_content, current_url)
                    for link in new_links:
                        if link not in self.visited_urls:
                            urls_to_visit.append((link, depth + 1))
            
            # 添加延迟
            time.sleep(self.delay)
            
        return results

    def save_html_content(self, results: Dict[str, str], output_dir: str) -> List[str]:
        """
        保存HTML内容到文件
        
        Args:
            results: URL到HTML内容的映射
            output_dir: 输出目录
            
        Returns:
            List[str]: 保存的文件路径列表
        """
        import os
        saved_files = []
        
        for url, content in results.items():
            # 生成文件名
            parsed_url = urlparse(url)
            filename = f"{hash(url)}.html"
            filepath = os.path.join(output_dir, filename)
            
            # 确保目录存在
            os.makedirs(output_dir, exist_ok=True)
            
            # 保存文件
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(content)
            saved_files.append(filepath)
            
        return saved_files 