import logging
import requests
from abc import ABC, abstractmethod
from bs4 import BeautifulSoup
from datetime import datetime
from typing import Dict, List, Optional, Any

class BaseCrawler(ABC):
    """
    爬虫基类，定义爬虫的基本接口和通用方法
    """
    
    def __init__(self, source_config: Dict[str, Any]):
        """
        初始化爬虫
        
        Args:
            source_config: 数据源配置信息
        """
        self.name = source_config.get('name', 'unknown')
        self.url = source_config.get('url', '')
        self.enabled = source_config.get('enabled', False)
        self.update_frequency = source_config.get('update_frequency', 'daily')
        self.logger = logging.getLogger(f"crawler.{self.name}")
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
        }
        
    def fetch_page(self, url: Optional[str] = None) -> Optional[BeautifulSoup]:
        """
        获取页面内容并解析
        
        Args:
            url: 要爬取的URL，如果为None则使用默认URL
            
        Returns:
            解析后的BeautifulSoup对象，失败返回None
        """
        target_url = url or self.url
        try:
            response = requests.get(target_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, 'html.parser')
            return soup
        except Exception as e:
            self.logger.error(f"Failed to fetch {target_url}: {str(e)}")
            return None
    
    @abstractmethod
    def extract_data(self, soup: BeautifulSoup) -> List[Dict[str, Any]]:
        """
        从页面中提取数据
        
        Args:
            soup: BeautifulSoup解析后的页面对象
            
        Returns:
            提取的数据列表
        """
        pass
    
    def crawl(self) -> List[Dict[str, Any]]:
        """
        执行爬取过程
        
        Returns:
            爬取的数据列表
        """
        if not self.enabled:
            self.logger.info(f"Crawler {self.name} is disabled, skipping...")
            return []
        
        self.logger.info(f"Starting crawl for {self.name} from {self.url}")
        soup = self.fetch_page()
        if not soup:
            return []
        
        try:
            data = self.extract_data(soup)
            # 添加元数据
            for item in data:
                item.update({
                    'source': self.name,
                    'source_url': self.url,
                    'crawled_at': datetime.now().isoformat(),
                })
            self.logger.info(f"Successfully crawled {len(data)} items from {self.name}")
            return data
        except Exception as e:
            self.logger.error(f"Error extracting data from {self.name}: {str(e)}")
            return []
            
    def __str__(self) -> str:
        return f"{self.name} Crawler ({self.url})" 