"""
统一数据源管理器 - 管理和协调所有数据源
支持爬虫、API、RSS等多种数据源的统一管理
"""
import asyncio
import logging
from typing import List, Dict, Any, Optional, Set
from datetime import datetime, timedelta
from enum import Enum
import hashlib
import json

# 导入所有数据源
from ..spiders.weibo_hot_spider import WeiboHotSpider
from ..spiders.zhihu_hot_spider import ZhihuHotSpider
from ..spiders.baidu_hot_spider import BaiduHotSpider
from ..spiders.toutiao_hot_spider import ToutiaoHotSpider
from ..spiders.bilibili_hot_spider import BilibiliHotSpider
from ..spiders.douyin_hot_spider import DouyinHotSpider
from ..spiders.xiaohongshu_hot_spider import XiaohongshuHotSpider

from .newsapi_aggregator import NewsAPIAggregator
from .rsshub_aggregator import RSSHubAggregator
from .rss_hot_service import RSSHotService
from .tophub_service import TophubService

logger = logging.getLogger(__name__)


class DataSourceType(Enum):
    """数据源类型"""
    CRAWLER = "crawler"      # 爬虫
    API = "api"              # API接口
    RSS = "rss"              # RSS订阅
    RSSHUB = "rsshub"        # RSSHub
    CUSTOM = "custom"        # 自定义


class DataSourceStatus(Enum):
    """数据源状态"""
    ACTIVE = "active"        # 活跃
    INACTIVE = "inactive"    # 非活跃
    ERROR = "error"         # 错误
    RATE_LIMITED = "rate_limited"  # 限流


class UnifiedDataSourceManager:
    """统一数据源管理器
    
    负责管理、调度和协调所有数据源
    提供统一的数据获取接口
    """
    
    def __init__(self):
        """初始化数据源管理器"""
        self.sources = {}
        self.source_status = {}
        self.source_metrics = {}
        self.cache = {}
        self.cache_ttl = 3600  # 缓存1小时
        
        # 初始化所有数据源
        self._initialize_sources()
    
    def _initialize_sources(self):
        """初始化所有数据源"""
        
        # 爬虫数据源
        self.sources['crawlers'] = {
            'weibo': {
                'type': DataSourceType.CRAWLER,
                'instance': WeiboHotSpider,
                'config': {'enabled': False, 'priority': 1},  # 禁用，使用 Tophub
                'categories': ['social', 'trending']
            },
            'zhihu': {
                'type': DataSourceType.CRAWLER,
                'instance': ZhihuHotSpider,
                'config': {'enabled': True, 'priority': 1},
                'categories': ['knowledge', 'trending']
            },
            'baidu': {
                'type': DataSourceType.CRAWLER,
                'instance': BaiduHotSpider,
                'config': {'enabled': True, 'priority': 2},
                'categories': ['search', 'trending']
            },
            'toutiao': {
                'type': DataSourceType.CRAWLER,
                'instance': ToutiaoHotSpider,
                'config': {'enabled': True, 'priority': 2},
                'categories': ['news', 'trending']
            },
            'bilibili': {
                'type': DataSourceType.CRAWLER,
                'instance': BilibiliHotSpider,
                'config': {'enabled': True, 'priority': 3},
                'categories': ['video', 'entertainment']
            },
            'douyin': {
                'type': DataSourceType.CRAWLER,
                'instance': DouyinHotSpider,
                'config': {'enabled': True, 'priority': 3},
                'categories': ['video', 'entertainment']
            },
            'xiaohongshu': {
                'type': DataSourceType.CRAWLER,
                'instance': XiaohongshuHotSpider,
                'config': {'enabled': True, 'priority': 3},
                'categories': ['lifestyle', 'trending']
            }
        }
        
        # API数据源
        self.sources['apis'] = {
            'tophub': {  # 新增 Tophub API
                'type': DataSourceType.API,
                'instance': TophubService,
                'config': {'enabled': True, 'priority': 0},  # 最高优先级
                'categories': ['all', 'trending']
            },
            'newsapi': {
                'type': DataSourceType.API,
                'instance': NewsAPIAggregator,
                'config': {'enabled': True, 'priority': 1},
                'categories': ['news', 'international']
            },
            'rss_service': {
                'type': DataSourceType.API,
                'instance': RSSHotService,
                'config': {'enabled': True, 'priority': 2},
                'categories': ['tech', 'community']
            }
        }
        
        # RSSHub数据源
        self.sources['rsshub'] = {
            'rsshub': {
                'type': DataSourceType.RSSHUB,
                'instance': RSSHubAggregator,
                'config': {'enabled': True, 'priority': 2},
                'categories': ['all']
            }
        }
        
        # 初始化状态
        for category in self.sources.values():
            for name, source in category.items():
                self.source_status[name] = DataSourceStatus.ACTIVE
                self.source_metrics[name] = {
                    'success_count': 0,
                    'error_count': 0,
                    'last_fetch': None,
                    'avg_response_time': 0
                }
    
    async def fetch_from_source(self, source_name: str) -> List[Dict[str, Any]]:
        """从指定数据源获取数据
        
        Args:
            source_name: 数据源名称
            
        Returns:
            数据列表
        """
        # 查找数据源
        source_info = None
        for category in self.sources.values():
            if source_name in category:
                source_info = category[source_name]
                break
        
        if not source_info:
            logger.error(f"Unknown source: {source_name}")
            return []
        
        if not source_info['config']['enabled']:
            logger.info(f"Source {source_name} is disabled")
            return []
        
        # 检查缓存
        cache_key = f"{source_name}:latest"
        if cache_key in self.cache:
            cached_data, cache_time = self.cache[cache_key]
            if (datetime.utcnow() - cache_time).seconds < self.cache_ttl:
                logger.info(f"Using cached data for {source_name}")
                return cached_data
        
        # 获取数据
        start_time = datetime.utcnow()
        try:
            data = await self._fetch_data(source_name, source_info)
            
            # 更新指标
            self.source_metrics[source_name]['success_count'] += 1
            self.source_metrics[source_name]['last_fetch'] = datetime.utcnow()
            
            # 计算平均响应时间
            response_time = (datetime.utcnow() - start_time).total_seconds()
            avg_time = self.source_metrics[source_name]['avg_response_time']
            count = self.source_metrics[source_name]['success_count']
            self.source_metrics[source_name]['avg_response_time'] = (
                (avg_time * (count - 1) + response_time) / count
            )
            
            # 更新缓存
            self.cache[cache_key] = (data, datetime.utcnow())
            
            return data
            
        except Exception as e:
            logger.error(f"Error fetching from {source_name}: {e}")
            self.source_metrics[source_name]['error_count'] += 1
            self.source_status[source_name] = DataSourceStatus.ERROR
            return []
    
    async def _fetch_data(self, source_name: str, source_info: Dict) -> List[Dict[str, Any]]:
        """实际获取数据的方法
        
        Args:
            source_name: 数据源名称
            source_info: 数据源信息
            
        Returns:
            数据列表
        """
        source_type = source_info['type']
        source_class = source_info['instance']
        
        if source_type == DataSourceType.CRAWLER:
            # 爬虫类数据源
            async with source_class() as spider:
                data = await spider.crawl_hot()
                return self._normalize_data(data, source_name)
        
        elif source_type == DataSourceType.API:
            # API类数据源
            if source_name == 'tophub':
                async with source_class() as service:
                    # 获取多平台热榜
                    multi_data = await service.get_multi_platform_hot()
                    # 合并所有平台数据
                    all_data = []
                    for platform, items in multi_data.items():
                        all_data.extend(items)
                    return all_data  # Tophub 已经返回标准化格式
            elif source_name == 'newsapi':
                async with source_class() as aggregator:
                    data = await aggregator.get_trending_topics(50)
                    return self._normalize_data(data, source_name)
            elif source_name == 'rss_service':
                async with source_class() as service:
                    results = await service.fetch_all()
                    all_data = []
                    for items in results.values():
                        all_data.extend(items)
                    return self._normalize_data(all_data, source_name)
        
        elif source_type == DataSourceType.RSSHUB:
            # RSSHub数据源
            async with source_class() as aggregator:
                data = await aggregator.get_trending_topics(50)
                return self._normalize_data(data, source_name)
        
        return []
    
    def _normalize_data(self, data: List[Dict], source_name: str) -> List[Dict[str, Any]]:
        """标准化数据格式
        
        Args:
            data: 原始数据
            source_name: 数据源名称
            
        Returns:
            标准化后的数据
        """
        normalized = []
        
        for item in data:
            if not item:
                continue
            
            # 生成唯一ID
            content = f"{item.get('title', '')}{item.get('url', '')}{source_name}"
            item_id = hashlib.md5(content.encode()).hexdigest()
            
            normalized_item = {
                'id': item_id,
                'source': source_name,
                'title': item.get('title', ''),
                'description': item.get('description', '') or item.get('excerpt', ''),
                'url': item.get('url', ''),
                'author': item.get('author', ''),
                'published_at': item.get('published_at') or item.get('publishedAt') or item.get('fetch_time'),
                'heat_value': item.get('heat_value') or item.get('heat', 0),
                'rank': item.get('rank', 0),
                'category': item.get('category', ''),
                'tags': item.get('tags', []),
                'metadata': {
                    'original_source': item.get('platform') or item.get('source', source_name),
                    'fetch_time': datetime.utcnow().isoformat() + 'Z'
                }
            }
            
            normalized.append(normalized_item)
        
        return normalized
    
    async def fetch_all_sources(self, categories: List[str] = None) -> Dict[str, List[Dict[str, Any]]]:
        """从所有启用的数据源获取数据
        
        Args:
            categories: 指定分类列表，None表示所有分类
            
        Returns:
            按数据源分组的数据
        """
        results = {}
        tasks = []
        source_names = []
        
        # 筛选要获取的数据源
        for category in self.sources.values():
            for name, info in category.items():
                if not info['config']['enabled']:
                    continue
                
                if categories:
                    # 检查分类匹配
                    source_categories = info.get('categories', [])
                    if not any(cat in source_categories for cat in categories):
                        continue
                
                tasks.append(self.fetch_from_source(name))
                source_names.append(name)
        
        # 并发获取数据
        if tasks:
            task_results = await asyncio.gather(*tasks, return_exceptions=True)
            
            for name, result in zip(source_names, task_results):
                if isinstance(result, Exception):
                    logger.error(f"Error fetching from {name}: {result}")
                    results[name] = []
                else:
                    results[name] = result
        
        return results
    
    async def aggregate_by_category(self, category: str) -> List[Dict[str, Any]]:
        """按分类聚合数据
        
        Args:
            category: 分类名称
            
        Returns:
            聚合后的数据列表
        """
        # 获取该分类的所有数据
        all_data = await self.fetch_all_sources(categories=[category])
        
        # 合并所有数据
        aggregated = []
        for source_data in all_data.values():
            aggregated.extend(source_data)
        
        # 去重和排序
        unique_items = self._deduplicate(aggregated)
        
        # 按热度和时间综合排序
        unique_items.sort(
            key=lambda x: (
                -int(str(x.get('heat_value', 0)).replace('万', '0000').replace('亿', '00000000')),
                x.get('published_at', '')
            )
        )
        
        return unique_items
    
    def _deduplicate(self, items: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """去重数据
        
        Args:
            items: 原始数据列表
            
        Returns:
            去重后的数据列表
        """
        seen_ids = set()
        seen_titles = set()
        unique_items = []
        
        for item in items:
            # 基于ID去重
            if item['id'] in seen_ids:
                continue
            
            # 基于标题相似度去重
            title_key = item['title'][:30].lower() if item.get('title') else ''
            if title_key and title_key in seen_titles:
                continue
            
            seen_ids.add(item['id'])
            if title_key:
                seen_titles.add(title_key)
            unique_items.append(item)
        
        return unique_items
    
    async def search_all_sources(self, keyword: str, limit: int = 50) -> List[Dict[str, Any]]:
        """在所有数据源中搜索
        
        Args:
            keyword: 搜索关键词
            limit: 返回数量限制
            
        Returns:
            搜索结果列表
        """
        all_data = await self.fetch_all_sources()
        
        results = []
        keyword_lower = keyword.lower()
        
        for source_data in all_data.values():
            for item in source_data:
                if (keyword_lower in item.get('title', '').lower() or 
                    keyword_lower in item.get('description', '').lower()):
                    results.append(item)
        
        # 去重和排序
        unique_results = self._deduplicate(results)
        
        return unique_results[:limit]
    
    def get_source_status(self) -> Dict[str, Any]:
        """获取所有数据源状态
        
        Returns:
            数据源状态信息
        """
        status = {
            'sources': {},
            'summary': {
                'total': 0,
                'active': 0,
                'error': 0,
                'disabled': 0
            }
        }
        
        for category in self.sources.values():
            for name, info in category.items():
                status['sources'][name] = {
                    'type': info['type'].value,
                    'status': self.source_status[name].value,
                    'enabled': info['config']['enabled'],
                    'priority': info['config']['priority'],
                    'categories': info.get('categories', []),
                    'metrics': self.source_metrics[name]
                }
                
                status['summary']['total'] += 1
                if not info['config']['enabled']:
                    status['summary']['disabled'] += 1
                elif self.source_status[name] == DataSourceStatus.ACTIVE:
                    status['summary']['active'] += 1
                elif self.source_status[name] == DataSourceStatus.ERROR:
                    status['summary']['error'] += 1
        
        return status
    
    def enable_source(self, source_name: str):
        """启用数据源"""
        for category in self.sources.values():
            if source_name in category:
                category[source_name]['config']['enabled'] = True
                self.source_status[source_name] = DataSourceStatus.ACTIVE
                logger.info(f"Enabled source: {source_name}")
                return
        logger.warning(f"Source not found: {source_name}")
    
    def disable_source(self, source_name: str):
        """禁用数据源"""
        for category in self.sources.values():
            if source_name in category:
                category[source_name]['config']['enabled'] = False
                self.source_status[source_name] = DataSourceStatus.INACTIVE
                logger.info(f"Disabled source: {source_name}")
                return
        logger.warning(f"Source not found: {source_name}")
    
    def clear_cache(self, source_name: str = None):
        """清除缓存
        
        Args:
            source_name: 指定数据源，None表示清除所有
        """
        if source_name:
            cache_key = f"{source_name}:latest"
            if cache_key in self.cache:
                del self.cache[cache_key]
                logger.info(f"Cleared cache for {source_name}")
        else:
            self.cache.clear()
            logger.info("Cleared all cache")


async def main():
    """测试统一数据源管理器"""
    logging.basicConfig(level=logging.INFO)
    
    manager = UnifiedDataSourceManager()
    
    # 获取数据源状态
    print("\n=== 数据源状态 ===")
    status = manager.get_source_status()
    print(f"总数据源: {status['summary']['total']}")
    print(f"活跃: {status['summary']['active']}")
    print(f"禁用: {status['summary']['disabled']}")
    print(f"错误: {status['summary']['error']}")
    
    # 测试从单个数据源获取数据
    print("\n=== 测试单个数据源 ===")
    weibo_data = await manager.fetch_from_source('weibo')
    print(f"微博热搜: {len(weibo_data)} 条")
    if weibo_data:
        print(f"示例: {weibo_data[0]['title']}")
    
    # 测试按分类聚合
    print("\n=== 按分类聚合 ===")
    trending_data = await manager.aggregate_by_category('trending')
    print(f"热门话题: {len(trending_data)} 条")
    for item in trending_data[:5]:
        print(f"- [{item['source']}] {item['title']}")
    
    # 测试搜索功能
    print("\n=== 搜索功能测试 ===")
    search_results = await manager.search_all_sources('AI', limit=10)
    print(f"搜索'AI'找到: {len(search_results)} 条")
    for item in search_results[:3]:
        print(f"- [{item['source']}] {item['title']}")
    
    # 测试获取所有数据源
    print("\n=== 获取所有数据源 ===")
    all_data = await manager.fetch_all_sources()
    for source, items in all_data.items():
        print(f"{source}: {len(items)} 条")
    
    return len(all_data) > 0


if __name__ == "__main__":
    result = asyncio.run(main())
    print(f"\n测试{'成功' if result else '失败'}")