"""基础爬虫类"""
import asyncio
import hashlib
import logging
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Optional
from datetime import datetime
import aiohttp
from aiohttp import ClientSession, ClientTimeout

logger = logging.getLogger(__name__)


class BaseSpider(ABC):
    """基础爬虫抽象类"""
    
    def __init__(self, name: str = None):
        """初始化爬虫
        
        Args:
            name: 爬虫名称
        """
        self.name = name or self.__class__.__name__
        self.session: Optional[ClientSession] = None
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        }
        self.timeout = ClientTimeout(total=30)
        self.retry_times = 3
        self.retry_delay = 1
        
    async def __aenter__(self):
        """异步上下文管理器入口"""
        await self.start()
        return self
        
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        await self.close()
        
    async def start(self):
        """启动爬虫"""
        if not self.session:
            self.session = ClientSession(
                headers=self.headers,
                timeout=self.timeout
            )
        logger.info(f"爬虫 {self.name} 已启动")
        
    async def close(self):
        """关闭爬虫"""
        if self.session:
            await self.session.close()
            self.session = None
        logger.info(f"爬虫 {self.name} 已关闭")
        
    async def fetch(self, url: str, **kwargs) -> Optional[str]:
        """获取页面内容
        
        Args:
            url: 目标URL
            **kwargs: 其他请求参数
            
        Returns:
            页面HTML内容，失败返回None
        """
        for attempt in range(self.retry_times):
            try:
                if not self.session:
                    await self.start()
                    
                async with self.session.get(url, **kwargs) as response:
                    response.raise_for_status()
                    # 尝试自动检测编码
                    content = await response.read()
                    encoding = response.charset if response.charset else 'utf-8'
                    try:
                        return content.decode(encoding)
                    except UnicodeDecodeError:
                        # 尝试其他常见编码
                        for enc in ['gbk', 'gb2312', 'gb18030', 'utf-8', 'latin-1']:
                            try:
                                return content.decode(enc)
                            except UnicodeDecodeError:
                                continue
                        # 如果都失败，使用ignore模式
                        return content.decode('utf-8', errors='ignore')
                    
            except aiohttp.ClientError as e:
                logger.warning(f"请求失败 {url}, 尝试 {attempt + 1}/{self.retry_times}: {e}")
                if attempt < self.retry_times - 1:
                    await asyncio.sleep(self.retry_delay * (attempt + 1))
            except Exception as e:
                logger.error(f"意外错误 {url}: {e}")
                break
                
        return None
        
    @abstractmethod
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析页面内容
        
        Args:
            html: HTML内容
            url: 页面URL（可选）
            
        Returns:
            解析后的数据列表
        """
        pass
        
    async def store(self, data: List[Dict[str, Any]]) -> bool:
        """存储数据（子类可重写）
        
        Args:
            data: 待存储的数据
            
        Returns:
            存储是否成功
        """
        logger.info(f"存储 {len(data)} 条数据")
        return True
        
    async def crawl(self, url: str, **kwargs) -> List[Dict[str, Any]]:
        """执行完整爬取流程
        
        Args:
            url: 目标URL
            **kwargs: 其他参数
            
        Returns:
            爬取到的数据
        """
        html = await self.fetch(url, **kwargs)
        if not html:
            logger.error(f"获取页面失败: {url}")
            return []
            
        data = await self.parse(html, url)
        if data:
            await self.store(data)
            
        return data
        
    @staticmethod
    def generate_hash(text: str) -> str:
        """生成文本哈希
        
        Args:
            text: 原始文本
            
        Returns:
            MD5哈希值
        """
        return hashlib.md5(text.encode()).hexdigest()