"""MongoDB存储管道"""
import logging
from typing import Dict, Any, List, Optional
from datetime import datetime, timedelta
from motor.motor_asyncio import AsyncIOMotorClient
import asyncio
from pathlib import Path

logger = logging.getLogger(__name__)


class MongoDBPipeline:
    """MongoDB数据存储管道
    
    负责将爬取的热点数据存储到MongoDB
    """
    
    def __init__(
        self,
        mongodb_url: str = "mongodb://localhost:27017",
        database: str = "aipaper",
        collection: str = "hot_topics",
        enable_cleaning: bool = True,
        cleaning_config_path: Optional[str] = None
    ):
        """初始化MongoDB管道
        
        Args:
            mongodb_url: MongoDB连接URL
            database: 数据库名
            collection: 集合名
            enable_cleaning: 是否启用数据清洗
            cleaning_config_path: 清洗配置文件路径
        """
        self.mongodb_url = mongodb_url
        self.database_name = database
        self.collection_name = collection
        self.client = None
        self.db = None
        self.collection = None
        self.batch_size = 100  # 批量写入大小
        self.buffer = []  # 写入缓冲区
        
        # 初始化清洗管道
        self.cleaning_pipeline = None
        if enable_cleaning:
            self._init_cleaning_pipeline(cleaning_config_path)
        
    async def connect(self):
        """连接MongoDB"""
        if not self.client:
            self.client = AsyncIOMotorClient(self.mongodb_url)
            self.db = self.client[self.database_name]
            self.collection = self.db[self.collection_name]
            
            # 创建索引
            await self._create_indexes()
            
            logger.info(f"连接MongoDB成功: {self.database_name}.{self.collection_name}")
            
    async def close(self):
        """关闭连接"""
        # 处理缓冲区中的数据
        if self.buffer:
            await self._flush_buffer()
            
        if self.client:
            self.client.close()
            self.client = None
            logger.info("MongoDB连接已关闭")
            
    async def _create_indexes(self):
        """创建集合索引"""
        try:
            # URL哈希唯一索引
            await self.collection.create_index(
                [("url_hash", 1)],
                unique=True,
                background=True,
                name="idx_url_hash"
            )
            
            # 复合索引：源+获取时间
            await self.collection.create_index(
                [("source", 1), ("fetch_time", -1)],
                background=True,
                name="idx_source_time"
            )
            
            # 热度值索引
            await self.collection.create_index(
                [("heat_value", -1)],
                background=True,
                name="idx_heat_value"
            )
            
            # 排名索引
            await self.collection.create_index(
                [("rank", 1)],
                background=True,
                name="idx_rank"
            )
            
            # TTL索引：自动删除30天前的数据
            await self.collection.create_index(
                [("expire_at", 1)],
                expireAfterSeconds=0,
                background=True,
                name="idx_expire_ttl"
            )
            
            # 文本索引：用于搜索
            await self.collection.create_index(
                [("title", "text"), ("content.description", "text")],
                background=True,
                name="idx_text_search"
            )
            
            logger.info("MongoDB索引创建成功")
            
        except Exception as e:
            logger.error(f"创建索引失败: {e}")
    
    def _init_cleaning_pipeline(self, config_path: Optional[str] = None):
        """初始化清洗管道
        
        Args:
            config_path: 配置文件路径
        """
        try:
            # Import cleaning pipeline
            from ..cleaning.pipeline import CleaningPipeline
            
            # Use default config path if not provided
            if not config_path:
                config_path = Path(__file__).parent.parent / 'config' / 'cleaning_config.yaml'
                config_path = str(config_path)
            
            # Create cleaning pipeline
            self.cleaning_pipeline = CleaningPipeline(config_path)
            logger.info(f"Cleaning pipeline initialized with config: {config_path}")
            
        except ImportError as e:
            logger.warning(f"Cleaning modules not available: {e}")
            self.cleaning_pipeline = None
        except Exception as e:
            logger.error(f"Failed to initialize cleaning pipeline: {e}")
            self.cleaning_pipeline = None
            
    async def process_item(self, item: Dict[str, Any]) -> Optional[Dict[str, Any]]:
        """处理单个数据项
        
        Args:
            item: 数据项
            
        Returns:
            处理后的数据项
        """
        try:
            await self.connect()
            
            # 添加必要字段
            item = self._prepare_item(item)
            
            # 添加到缓冲区
            self.buffer.append(item)
            
            # 如果缓冲区满，执行批量写入
            if len(self.buffer) >= self.batch_size:
                await self._flush_buffer()
                
            return item
            
        except Exception as e:
            logger.error(f"处理数据项失败: {e}")
            return None
            
    def _prepare_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
        """准备数据项用于存储
        
        Args:
            item: 原始数据项
            
        Returns:
            准备好的数据项
        """
        # 确保有URL哈希
        if 'url_hash' not in item and 'url' in item:
            import hashlib
            item['url_hash'] = hashlib.md5(item['url'].encode()).hexdigest()
            
        # 设置时间字段
        now = datetime.utcnow()
        if 'fetch_time' not in item:
            item['fetch_time'] = now.isoformat() + 'Z'
            
        if 'update_time' not in item:
            item['update_time'] = now.isoformat() + 'Z'
            
        # 设置过期时间（30天后）
        if 'expire_at' not in item:
            expire_date = now + timedelta(days=30)
            item['expire_at'] = expire_date
            
        # 确保source字段
        if 'source' not in item:
            item['source'] = 'weibo'
            
        # 处理评论数组大小限制
        if 'content' in item and 'comments' in item['content']:
            # 只保留前20条评论
            item['content']['comments'] = item['content']['comments'][:20]
        
        # 调用清洗管道
        if self.cleaning_pipeline:
            try:
                item = self.cleaning_pipeline.process(item)
                logger.debug("Data cleaning completed")
            except Exception as e:
                logger.error(f"Cleaning pipeline error: {e}")
        
        return item
        
    async def _flush_buffer(self):
        """刷新缓冲区，批量写入数据"""
        if not self.buffer:
            return
            
        try:
            # 准备批量操作
            operations = []
            
            for item in self.buffer:
                # 使用upsert操作（存在则更新，不存在则插入）
                operations.append({
                    'update_one': {
                        'filter': {'url_hash': item['url_hash']},
                        'update': {'$set': item},
                        'upsert': True
                    }
                })
                
            # 执行批量写入
            if operations:
                result = await self.collection.bulk_write(operations, ordered=False)
                
                logger.info(
                    f"批量写入完成: "
                    f"插入{result.upserted_count}条, "
                    f"更新{result.modified_count}条"
                )
                
            # 清空缓冲区
            self.buffer.clear()
            
        except Exception as e:
            logger.error(f"批量写入失败: {e}")
            # 可以选择重试或保存失败数据
            self.buffer.clear()
            
    async def process_batch(self, items: List[Dict[str, Any]]) -> int:
        """批量处理数据项
        
        Args:
            items: 数据项列表
            
        Returns:
            成功处理的数量
        """
        success_count = 0
        
        for item in items:
            result = await self.process_item(item)
            if result:
                success_count += 1
                
        # 最后刷新缓冲区
        await self._flush_buffer()
        
        return success_count
        
    async def get_latest_items(
        self,
        source: str = "weibo",
        limit: int = 50
    ) -> List[Dict[str, Any]]:
        """获取最新的热点数据
        
        Args:
            source: 数据源
            limit: 返回数量限制
            
        Returns:
            热点数据列表
        """
        await self.connect()
        
        cursor = self.collection.find(
            {"source": source},
            {"_id": 0}  # 不返回_id字段
        ).sort("fetch_time", -1).limit(limit)
        
        items = []
        async for doc in cursor:
            items.append(doc)
            
        return items
        
    async def get_statistics(self) -> Dict[str, Any]:
        """获取存储统计信息
        
        Returns:
            统计信息
        """
        await self.connect()
        
        # 总记录数
        total_count = await self.collection.count_documents({})
        
        # 今日记录数
        today_start = datetime.utcnow().replace(
            hour=0, minute=0, second=0, microsecond=0
        )
        today_count = await self.collection.count_documents({
            "fetch_time": {"$gte": today_start.isoformat()}
        })
        
        # 各源统计
        pipeline = [
            {
                "$group": {
                    "_id": "$source",
                    "count": {"$sum": 1}
                }
            }
        ]
        
        source_stats = {}
        async for doc in self.collection.aggregate(pipeline):
            source_stats[doc['_id']] = doc['count']
            
        # 获取最后更新时间
        last_doc = await self.collection.find_one(
            {},
            sort=[("fetch_time", -1)]
        )
        last_update = last_doc.get('fetch_time') if last_doc else None
        
        return {
            'total_records': total_count,
            'today_records': today_count,
            'source_statistics': source_stats,
            'last_update': last_update,
            'collection': self.collection_name,
            'database': self.database_name
        }
        
    async def search_items(
        self,
        keyword: str,
        limit: int = 20
    ) -> List[Dict[str, Any]]:
        """搜索热点数据
        
        Args:
            keyword: 搜索关键词
            limit: 返回数量限制
            
        Returns:
            匹配的数据列表
        """
        await self.connect()
        
        # 使用文本索引搜索
        cursor = self.collection.find(
            {"$text": {"$search": keyword}},
            {"_id": 0, "score": {"$meta": "textScore"}}
        ).sort([("score", {"$meta": "textScore"})]).limit(limit)
        
        items = []
        async for doc in cursor:
            items.append(doc)
            
        return items
        
    async def cleanup_old_data(self, days: int = 30):
        """清理旧数据
        
        Args:
            days: 保留天数
        """
        await self.connect()
        
        cutoff_date = datetime.utcnow() - timedelta(days=days)
        
        result = await self.collection.delete_many({
            "fetch_time": {"$lt": cutoff_date.isoformat()}
        })
        
        logger.info(f"清理旧数据: 删除{result.deleted_count}条")
        
        return result.deleted_count