"""增强版基础爬虫类
提供更强大的抽象和功能
"""
import asyncio
import hashlib
import json
import logging
import time
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Optional, Callable
from datetime import datetime
from collections import defaultdict

import aiohttp
from aiohttp import ClientSession, ClientTimeout

from .base_spider import BaseSpider
from ..utils.config_manager import config_manager
from ..utils.anti_crawler import anti_crawler_strategy, weibo_cookie_manager

logger = logging.getLogger(__name__)


class RateLimiter:
    """速率限制器"""
    
    def __init__(self, max_requests_per_minute: int = 30):
        self.max_requests_per_minute = max_requests_per_minute
        self.requests = defaultdict(list)
    
    async def acquire(self, domain: str):
        """获取请求许可
        
        Args:
            domain: 域名
        """
        current_time = time.time()
        minute_ago = current_time - 60
        
        # 清理过期请求记录
        self.requests[domain] = [
            req_time for req_time in self.requests[domain]
            if req_time > minute_ago
        ]
        
        # 检查是否超过限制
        if len(self.requests[domain]) >= self.max_requests_per_minute:
            # 计算需要等待的时间
            oldest_request = min(self.requests[domain])
            wait_time = 60 - (current_time - oldest_request) + 0.1
            if wait_time > 0:
                logger.debug(f"Rate limit reached for {domain}, waiting {wait_time:.2f}s")
                await asyncio.sleep(wait_time)
                # 递归调用，重新检查
                await self.acquire(domain)
        
        # 记录本次请求
        self.requests[domain].append(current_time)


class DataNormalizer:
    """数据规范化器"""
    
    @staticmethod
    def normalize(data: Dict[str, Any], platform: str) -> Dict[str, Any]:
        """规范化数据格式
        
        Args:
            data: 原始数据
            platform: 平台名称
            
        Returns:
            规范化后的数据
        """
        # 获取字段映射配置
        field_mapping = config_manager.get_field_mapping()
        common_fields = field_mapping.get('common_fields', [])
        
        normalized = {
            'platform': platform,
            'crawled_at': datetime.now().isoformat(),
            'data_id': DataNormalizer.generate_id(data, platform)
        }
        
        # 映射通用字段
        for field_info in common_fields:
            if isinstance(field_info, dict):
                for field_name, field_desc in field_info.items():
                    if field_name in data:
                        normalized[field_name] = data[field_name]
        
        # 保留原始数据
        normalized['raw_data'] = data
        
        return normalized
    
    @staticmethod
    def generate_id(data: Dict[str, Any], platform: str) -> str:
        """生成数据唯一ID
        
        Args:
            data: 数据
            platform: 平台名称
            
        Returns:
            唯一ID
        """
        # 使用标题和平台生成唯一ID
        title = data.get('title', '')
        unique_str = f"{platform}_{title}_{datetime.now().strftime('%Y%m%d')}"
        return hashlib.md5(unique_str.encode()).hexdigest()


class EnhancedBaseSpider(BaseSpider):
    """增强版基础爬虫类"""
    
    def __init__(self, platform: str, name: str = None):
        """初始化爬虫
        
        Args:
            platform: 平台标识
            name: 爬虫名称
        """
        super().__init__(name=name or f"{platform.title()}Spider")
        
        self.platform = platform
        self.config = config_manager.get_platform_config(platform)
        self.rate_limiter = RateLimiter(self.config.get('rate_limit_per_minute', 30))
        self.data_normalizer = DataNormalizer()
        
        # 更新配置
        self.retry_times = self.config.get('retry_times', 3)
        self.retry_delay = self.config.get('retry_delay', 2)
        self.timeout = ClientTimeout(total=self.config.get('timeout', 30))
        
        # 更新请求头
        self.headers = config_manager.get_headers(platform)
        
        # 错误处理
        self.error_handlers: Dict[type, Callable] = {}
        self.register_default_error_handlers()
        
        # 中间件
        self.middlewares: List[Callable] = []
        
        # 统计信息
        self.stats = {
            'requests': 0,
            'success': 0,
            'failed': 0,
            'errors': defaultdict(int)
        }
    
    def register_default_error_handlers(self):
        """注册默认错误处理器"""
        self.register_error_handler(
            aiohttp.ClientError,
            self._handle_client_error
        )
        self.register_error_handler(
            asyncio.TimeoutError,
            self._handle_timeout_error
        )
        self.register_error_handler(
            json.JSONDecodeError,
            self._handle_json_error
        )
    
    def register_error_handler(self, error_type: type, handler: Callable):
        """注册错误处理器
        
        Args:
            error_type: 错误类型
            handler: 处理函数
        """
        self.error_handlers[error_type] = handler
    
    def add_middleware(self, middleware: Callable):
        """添加中间件
        
        Args:
            middleware: 中间件函数
        """
        self.middlewares.append(middleware)
    
    async def _handle_client_error(self, error: aiohttp.ClientError, url: str, **kwargs):
        """处理客户端错误
        
        Args:
            error: 错误对象
            url: 请求URL
            **kwargs: 其他参数
        """
        self.stats['errors']['client_error'] += 1
        logger.warning(f"Client error for {url}: {error}")
        
        # 如果是连接错误，可能需要更换代理
        if isinstance(error, aiohttp.ClientConnectorError):
            # 尝试使用代理
            proxy = anti_crawler_strategy.get_proxy()
            if proxy and 'proxy' not in kwargs:
                kwargs['proxy'] = proxy
                logger.info(f"Retrying with proxy: {proxy}")
                return await self.fetch(url, **kwargs)
        
        return None
    
    async def _handle_timeout_error(self, error: asyncio.TimeoutError, url: str, **kwargs):
        """处理超时错误
        
        Args:
            error: 错误对象
            url: 请求URL
            **kwargs: 其他参数
        """
        self.stats['errors']['timeout'] += 1
        logger.warning(f"Timeout for {url}")
        return None
    
    async def _handle_json_error(self, error: json.JSONDecodeError, response_text: str):
        """处理JSON解析错误
        
        Args:
            error: 错误对象
            response_text: 响应文本
        """
        self.stats['errors']['json_error'] += 1
        logger.error(f"JSON decode error: {error}")
        logger.debug(f"Response text: {response_text[:500]}...")
        return None
    
    async def fetch(self, url: str, method: str = 'GET', **kwargs) -> Optional[str]:
        """增强的请求方法
        
        Args:
            url: 目标URL
            method: 请求方法
            **kwargs: 其他请求参数
            
        Returns:
            响应内容
        """
        # 应用速率限制
        from urllib.parse import urlparse
        domain = urlparse(url).netloc
        await self.rate_limiter.acquire(domain)
        
        # 应用反爬策略
        if self.config.get('use_anti_crawler', True):
            await anti_crawler_strategy.apply_delay(1, 3)
            kwargs['headers'] = kwargs.get('headers', {})
            kwargs['headers']['User-Agent'] = anti_crawler_strategy.get_random_user_agent()
        
        # 处理Cookie
        if self.config.get('requires_cookie', False):
            cookie = None
            if self.platform == 'weibo':
                cookie = weibo_cookie_manager.get_next_cookie()
            else:
                cookie = anti_crawler_strategy.get_cookie(self.platform)
            
            if cookie:
                kwargs['headers'] = kwargs.get('headers', {})
                kwargs['headers']['Cookie'] = cookie
        
        # 应用中间件
        for middleware in self.middlewares:
            kwargs = await middleware(url, kwargs)
        
        self.stats['requests'] += 1
        
        # 执行请求
        for attempt in range(self.retry_times):
            try:
                if not self.session:
                    await self.start()
                
                # 合并请求头
                request_headers = {**self.headers}
                if 'headers' in kwargs:
                    request_headers.update(kwargs.pop('headers'))
                
                async with self.session.request(
                    method,
                    url,
                    headers=request_headers,
                    **kwargs
                ) as response:
                    response.raise_for_status()
                    
                    # 处理响应
                    content = await response.read()
                    encoding = response.charset if response.charset else 'utf-8'
                    
                    try:
                        text = content.decode(encoding)
                    except UnicodeDecodeError:
                        # 尝试其他编码
                        for enc in ['gbk', 'gb2312', 'gb18030', 'utf-8', 'latin-1']:
                            try:
                                text = content.decode(enc)
                                break
                            except UnicodeDecodeError:
                                continue
                        else:
                            text = content.decode('utf-8', errors='ignore')
                    
                    self.stats['success'] += 1
                    return text
                    
            except Exception as e:
                self.stats['failed'] += 1
                
                # 查找对应的错误处理器
                for error_type, handler in self.error_handlers.items():
                    if isinstance(e, error_type):
                        result = await handler(e, url, **kwargs)
                        if result is not None:
                            return result
                        break
                
                if attempt < self.retry_times - 1:
                    delay = self.retry_delay * (attempt + 1)
                    logger.debug(f"Retry {attempt + 1}/{self.retry_times} after {delay}s")
                    await asyncio.sleep(delay)
                else:
                    logger.error(f"All retries failed for {url}: {e}")
        
        return None
    
    async def fetch_json(self, url: str, **kwargs) -> Optional[Dict[str, Any]]:
        """获取JSON响应
        
        Args:
            url: 目标URL
            **kwargs: 请求参数
            
        Returns:
            JSON数据
        """
        response = await self.fetch(url, **kwargs)
        if not response:
            return None
        
        try:
            return json.loads(response)
        except json.JSONDecodeError as e:
            await self._handle_json_error(e, response)
            return None
    
    async def fetch_multiple(self, urls: List[str], **kwargs) -> List[Optional[str]]:
        """并发获取多个URL
        
        Args:
            urls: URL列表
            **kwargs: 请求参数
            
        Returns:
            响应内容列表
        """
        max_concurrent = self.config.get('max_concurrent_requests', 5)
        semaphore = asyncio.Semaphore(max_concurrent)
        
        async def fetch_with_semaphore(url: str):
            async with semaphore:
                return await self.fetch(url, **kwargs)
        
        tasks = [fetch_with_semaphore(url) for url in urls]
        return await asyncio.gather(*tasks)
    
    async def parse_and_normalize(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析并规范化数据
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            规范化后的数据列表
        """
        # 调用子类的解析方法
        raw_data = await self.parse(html, url)
        
        # 规范化数据
        normalized_data = []
        for item in raw_data:
            normalized_item = self.data_normalizer.normalize(item, self.platform)
            normalized_data.append(normalized_item)
        
        return normalized_data
    
    async def crawl(self, url: str = None, **kwargs) -> List[Dict[str, Any]]:
        """执行完整爬取流程
        
        Args:
            url: 目标URL，如果为空则使用默认URL
            **kwargs: 其他参数
            
        Returns:
            爬取到的数据
        """
        # 如果没有提供URL，尝试获取默认URL
        if not url:
            url = config_manager.get_api_url(self.platform, 'hot_search')
            if not url:
                url = config_manager.get_api_url(self.platform, 'hot_list')
        
        if not url:
            logger.error(f"No URL provided for {self.platform}")
            return []
        
        logger.info(f"Crawling {self.platform} from {url}")
        
        # 获取页面内容
        html = await self.fetch(url, **kwargs)
        if not html:
            logger.error(f"Failed to fetch {url}")
            return []
        
        # 解析并规范化数据
        data = await self.parse_and_normalize(html, url)
        
        # 存储数据
        if data:
            await self.store(data)
        
        # 输出统计信息
        logger.info(
            f"Crawl stats for {self.platform}: "
            f"Requests: {self.stats['requests']}, "
            f"Success: {self.stats['success']}, "
            f"Failed: {self.stats['failed']}"
        )
        
        return data
    
    def get_stats(self) -> Dict[str, Any]:
        """获取统计信息
        
        Returns:
            统计信息字典
        """
        return self.stats.copy()
    
    @abstractmethod
    async def parse(self, html: str, url: str = None) -> List[Dict[str, Any]]:
        """解析页面内容（子类必须实现）
        
        Args:
            html: HTML内容
            url: 页面URL
            
        Returns:
            解析后的数据列表
        """
        pass