import random
import logging
import time
import requests
from bs4 import BeautifulSoup
from typing import Optional, Union
from urllib.parse import urlparse
import yaml

class BaseCrawler:
    """爬虫基类，支持HTML和RSS抓取模式"""
    
    def __init__(self, config_path: str = 'config/config.yaml'):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.config = self._load_config(config_path)
        self.max_retries = self.config.get('crawler', {}).get('max_retries', 3)
        self.user_agents = [self.config.get('crawler', {}).get('user_agent')]
        self.request_interval = self.config.get('crawler', {}).get('request_interval', 5)
        
    def _load_config(self, config_path: str) -> dict:
        """加载配置文件"""
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)
        
    def _get_random_user_agent(self) -> str:
        """获取随机User-Agent"""
        return random.choice(self.user_agents)
        
    def _make_request(self, url: str, mode: str = 'html') -> Optional[Union[BeautifulSoup, dict]]:
        """
        发送HTTP请求并返回解析后的内容
        :param url: 目标URL
        :param mode: 抓取模式，支持html/rss
        :return: BeautifulSoup对象或RSS字典
        """
        headers = {'User-Agent': self._get_random_user_agent()}
        
        for attempt in range(self.max_retries):
            try:
                # 设置更严格的超时：连接5秒，读取10秒
                response = requests.get(
                    url, 
                    headers=headers, 
                    timeout=(5, 10)
                )
                response.raise_for_status()
                
                if mode == 'html':
                    return BeautifulSoup(response.text, 'html.parser')
                elif mode == 'rss':
                    return self._parse_rss(response.text)
                else:
                    raise ValueError(f"Unsupported mode: {mode}")
                    
            except Exception as e:
                self.logger.warning(f"Attempt {attempt + 1} failed: {str(e)}")
                if attempt == self.max_retries - 1:
                    self.logger.error(f"Failed after {self.max_retries} attempts")
                    raise
                time.sleep(self.request_interval)
                
    def _parse_rss(self, rss_content: str) -> dict:
        """解析RSS内容"""
        # TODO: 实现RSS解析逻辑
        raise NotImplementedError("RSS parsing not implemented yet")
        
    def crawl(self, url: str, mode: str = 'html') -> Optional[Union[BeautifulSoup, dict]]:
        """
        执行爬取操作
        :param url: 目标URL
        :param mode: 抓取模式，支持html/rss
        :return: 解析后的内容
        """
        try:
            parsed = urlparse(url)
            if not parsed.scheme and not parsed.netloc:
                raise ValueError("Invalid URL format")
                
            if not parsed.scheme:
                url = 'http://' + url
                
            return self._make_request(url, mode)
        except ValueError as e:
            self.logger.error(f"Invalid URL: {url}")
            raise
