"""
爬虫工厂类
统一管理不同类型的爬虫，支持扩展新的数据源
"""
from typing import Dict, List, Optional, Any
from abc import ABC, abstractmethod
from loguru import logger

from db.base import SessionLocal
from db.models.data_collect import DataSource
from .collect_utils import get_total_pages
from .emergency_spider import EmergencySpider
from .collect_utils import extract_articles, safe_request, LIST_URL, BASE_URL


class BaseSpider(ABC):
    """爬虫基类"""
    
    def __init__(self, source_config: Dict[str, Any]):
        self.source_config = source_config
        self.source_id = source_config.get('source_id')
        self.name = source_config.get('name')
        self.base_url = source_config.get('base_url')
        self.category = source_config.get('category')
    
    @abstractmethod
    def get_latest_articles(self, limit: int = 20) -> List[Dict]:
        """获取最新文章列表"""
        pass
    
    @abstractmethod
    def crawl_articles(self, time_range: str = None) -> List[Dict]:
        """爬取文章详情"""
        pass
    
    def parse_date(self, date_str: str) -> Optional[Any]:
        """解析日期字符串"""
        try:
            from dateutil.parser import parse as date_parse
            # 处理中文日期格式：2025年07月25日 -> 2025-07-25
            if '年' in date_str and '月' in date_str and '日' in date_str:
                import re
                match = re.search(r'(\d{4})年(\d{1,2})月(\d{1,2})日', date_str)
                if match:
                    year, month, day = match.groups()
                    date_str = f"{year}-{month.zfill(2)}-{day.zfill(2)}"
            return date_parse(date_str)
        except Exception:
            return None
    
    def in_time_range(self, article_date, time_range: str) -> bool:
        """检查文章是否在时间范围内"""
        import datetime
        from dateutil.relativedelta import relativedelta
        
        if not article_date:
            return False
        
        now = datetime.datetime.now()
        if time_range == '1d':
            return article_date >= now - relativedelta(days=1)
        elif time_range == '3d':
            return article_date >= now - relativedelta(days=3)
        elif time_range == '1w':
            return article_date >= now - relativedelta(weeks=1)
        elif time_range == '1m':
            return article_date >= now - relativedelta(months=1)
        return True


class MotGovSpider(BaseSpider):
    """交通运输部爬虫（原有逻辑）"""
    
    def get_latest_articles(self, limit: int = 20) -> List[Dict]:
        """获取最新文章列表"""
        html = safe_request(LIST_URL)
        if not html:
            return []
        
        articles = extract_articles(html)
        return articles[:limit]
    
    def crawl_articles(self, time_range: str = None) -> List[Dict]:
        """爬取文章详情"""
        
        html = safe_request(LIST_URL)
        if not html:
            return []
        
        total_pages = get_total_pages(html)
        all_articles = []
        
        for page in range(0, total_pages):
            if page == 0:
                page_html = html
            else:
                page_url = f"{BASE_URL}list_{page}.html"
                page_html = safe_request(page_url)
                if not page_html:
                    continue
            
            articles = extract_articles(page_html)
            for article in articles:
                article_date = self.parse_date(article['date'])
                if not time_range or self.in_time_range(article_date, time_range):
                    article['publish_time'] = article_date
                    all_articles.append(article)
                else:
                    # 如果超出时间范围，停止采集
                    return all_articles
        
        return all_articles


class EmergencySpiderWrapper(BaseSpider):
    """应急管理部爬虫包装器"""
    
    def __init__(self, source_config: Dict[str, Any]):
        super().__init__(source_config)
        self.spider = EmergencySpider()
        self.spider_config = source_config.get('spider_config', {})
    
    def get_latest_articles(self, limit: int = 20) -> List[Dict]:
        """获取最新文章列表"""
        category = self.spider_config.get('category')
        return self.spider.get_latest_articles(category=category, limit=limit)
    
    def crawl_articles(self, time_range: str = None) -> List[Dict]:
        """爬取文章详情"""
        category = self.spider_config.get('category')
        category_name = self.spider_config.get('category_name')
        
        # 获取分类页面
        category_url = f"{self.spider.base_url}{category}/"
        html = self.spider.get_page_content(category_url)
        if not html:
            return []
        
        # 解析文章列表
        articles = self.spider.parse_list_page(html, category_name)
        
        # 过滤时间范围
        if time_range:
            filtered_articles = []
            for article in articles:
                article_date = self.parse_date(article['date'])
                if self.in_time_range(article_date, time_range):
                    article['publish_time'] = article_date
                    filtered_articles.append(article)
            articles = filtered_articles
        
        # 获取文章详情
        detailed_articles = []
        for article in articles:
            try:
                content, attachments = self.spider.parse_detail_page(article['url'])
                article['content'] = content
                article['attachments'] = attachments
                detailed_articles.append(article)
            except Exception as e:
                logger.error(f"获取文章详情失败 {article['url']}: {e}")
        
        return detailed_articles


class SpiderFactory:
    """爬虫工厂类"""
    
    _spider_types = {
        'mot_gov': MotGovSpider,
        'emergency': EmergencySpiderWrapper,
    }
    
    @classmethod
    def create_spider(cls, source_id: str) -> Optional[BaseSpider]:
        """根据数据源ID创建爬虫实例"""
        db = SessionLocal()
        try:
            source = db.query(DataSource).filter(DataSource.source_id == source_id).first()
            if not source:
                logger.error(f"未找到数据源: {source_id}")
                return None
            
            spider_type = source.spider_config.get('spider_type')
            if spider_type not in cls._spider_types:
                logger.error(f"不支持的爬虫类型: {spider_type}")
                return None
            
            spider_class = cls._spider_types[spider_type]
            source_config = {
                'source_id': source.source_id,
                'name': source.name,
                'base_url': source.base_url,
                'category': source.category,
                'spider_config': source.spider_config
            }
            
            return spider_class(source_config)
            
        except Exception as e:
            logger.error(f"创建爬虫失败: {e}")
            return None
        finally:
            db.close()
    
    @classmethod
    def get_available_spiders(cls) -> List[Dict]:
        """获取所有可用的爬虫"""
        db = SessionLocal()
        try:
            sources = db.query(DataSource).filter(DataSource.is_active == True).all()
            return [
                {
                    'source_id': source.source_id,
                    'name': source.name,
                    'description': source.description,
                    'base_url': source.base_url,
                    'category': source.category,
                    'spider_type': source.spider_config.get('spider_type')
                }
                for source in sources
            ]
        finally:
            db.close()
    
    @classmethod
    def register_spider_type(cls, spider_type: str, spider_class: type):
        """注册新的爬虫类型"""
        cls._spider_types[spider_type] = spider_class
