import functools
import logging
import yaml
from typing import Dict, List, Optional, Any
from pathlib import Path
from .request_utils import RequestUtils

class BaseCrawler:
    def __init__(self, config_path: str):
        self.config = self._load_config(config_path)
        self.request_utils = RequestUtils()
        self._setup_logging()
        
    def _load_config(self, config_path: str) -> Dict[str, Any]:
        with open(config_path) as f:
            return yaml.safe_load(f)
            
    def _setup_logging(self):
        Path("logs").mkdir(exist_ok=True)
        logging.basicConfig(
            filename="logs/crawler.log",
            level=logging.INFO,
            format="%(asctime)s - %(levelname)s - %(message)s"
        )
        
    def handle_exceptions(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            try:
                return func(*args, **kwargs)
            except Exception as e:
                logging.error(f"Error in {func.__name__}: {str(e)}")
                return None
        return wrapper
        
    @handle_exceptions
    def parse_article_list(self, html: str) -> List[Dict[str, str]]:
        # 实现文章列表解析
        pass
        
    @handle_exceptions    
    def parse_article_detail(self, html: str) -> Dict[str, str]:
        # 实现文章详情解析
        pass
        
    def validate_result(self, result: Dict[str, Any]) -> bool:
        required_fields = ["title", "content", "publish_time"]
        return all(field in result for field in required_fields)
        
    def crawl_page(self, url: str, page: int = 1) -> Optional[List[Dict[str, Any]]]:
        response = self.request_utils.request("GET", url, params={"page": page})
        if not response:
            return None
            
        articles = self.parse_article_list(response.text)
        if not articles:
            return None
            
        results = []
        for article in articles:
            detail_response = self.request_utils.request("GET", article["link"])
            if not detail_response:
                continue
                
            detail = self.parse_article_detail(detail_response.text)
            if detail and self.validate_result(detail):
                results.append(detail)
                
        return results
        
    def crawl(self, start_page: int = 1, max_pages: int = 10) -> List[Dict[str, Any]]:
        results = []
        current_page = start_page
        
        while current_page <= max_pages:
            page_results = self.crawl_page(self.config["base_url"], current_page)
            if not page_results:
                break
                
            results.extend(page_results)
            current_page += 1
            
        return results
