import requests
import time
import random
from utils.logger import setup_logger
from config.settings import MAX_RETRIES, REQUEST_TIMEOUT

logger = setup_logger(__name__)

class WebCrawler:
    @staticmethod
    def get_headers():
        return {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive'
        }

    @classmethod
    def fetch_webpage(cls, url, max_retries=MAX_RETRIES):
        """获取网页内容，支持重试机制"""
        for attempt in range(max_retries):
            try:
                time.sleep(random.uniform(2, 5))
                response = requests.get(
                    url, 
                    headers=cls.get_headers(), 
                    timeout=REQUEST_TIMEOUT
                )
                response.raise_for_status()
                response.encoding = response.apparent_encoding
                return response.text
            except requests.exceptions.RequestException as e:
                logger.error(f"第 {attempt + 1} 次尝试失败: {str(e)}")
                if attempt == max_retries - 1:
                    logger.error(f"已达到最大重试次数 ({max_retries})")
                    return None
                logger.info("等待后重试...")
                time.sleep(random.uniform(5, 10))
        return None 