import asyncio
import logging
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
from database.connection_manager import connection_manager
from models.article import ArticleModel

logger = logging.getLogger(__name__)

class DatabaseHelper:
    """数据库操作辅助类"""
    
    def __init__(self):
        self.collection_name = "articles"
    
    async def save_article(self, article: ArticleModel) -> bool:
        """
        保存文章到数据库
        
        Args:
            article: 文章模型
            
        Returns:
            bool: 保存是否成功
        """
        if not article.url:
            logger.error("文章URL不能为空")
            return False
        
        try:
            async with connection_manager.get_collection(self.collection_name) as collection:
                article_dict = article.dict(by_alias=True, exclude_unset=True)
                
                # 检查URL是否已存在
                existing = await collection.find_one({"url": article.url})
                if existing:
                    logger.warning(f"文章URL已存在: {article.url}")
                    return False
                
                result = await collection.insert_one(article_dict)
                
                if result.inserted_id:
                    logger.info(f"文章保存成功，ID: {result.inserted_id}")
                    return True
                else:
                    logger.error("文章保存失败，未获得插入ID")
                    return False
                    
        except Exception as e:
            logger.error(f"保存文章失败: {e}")
            return False
    
    async def batch_save_articles(self, articles: List[ArticleModel]) -> Dict[str, int]:
        """
        批量保存文章
        
        Args:
            articles: 文章列表
            
        Returns:
            Dict[str, int]: 保存结果统计
        """
        if not articles:
            return {"success": 0, "failed": 0, "skipped": 0}
        
        result_stats = {"success": 0, "failed": 0, "skipped": 0}
        
        try:
            async with connection_manager.get_collection(self.collection_name) as collection:
                for article in articles:
                    try:
                        # 检查URL是否已存在
                        existing = await collection.find_one({"url": article.url})
                        if existing:
                            result_stats["skipped"] += 1
                            continue
                        
                        article_dict = article.dict(by_alias=True, exclude_unset=True)
                        result = await collection.insert_one(article_dict)
                        
                        if result.inserted_id:
                            result_stats["success"] += 1
                        else:
                            result_stats["failed"] += 1
                            
                    except Exception as e:
                        logger.error(f"批量保存单篇文章失败 {article.url}: {e}")
                        result_stats["failed"] += 1
                
                logger.info(f"批量保存完成: {result_stats}")
                return result_stats
                
        except Exception as e:
            logger.error(f"批量保存操作失败: {e}")
            return result_stats
    
    async def get_articles_by_industry(self, industry_keyword: str, limit: int = 100, skip: int = 0) -> List[ArticleModel]:
        """
        根据行业关键词获取文章
        
        Args:
            industry_keyword: 行业关键词
            limit: 返回数量限制
            skip: 跳过数量
            
        Returns:
            List[ArticleModel]: 文章列表
        """
        if not industry_keyword:
            logger.error("行业关键词不能为空")
            return []
        
        try:
            async with connection_manager.get_collection(self.collection_name) as collection:
                cursor = collection.find(
                    {"industry_keyword": industry_keyword, "status": "success"}
                ).skip(skip).limit(limit).sort("crawl_time", -1)
                
                articles = []
                async for doc in cursor:
                    try:
                        articles.append(ArticleModel(**doc))
                    except Exception as e:
                        logger.error(f"解析文章数据失败: {e}")
                        continue
                
                logger.info(f"获取到{len(articles)}篇{industry_keyword}行业文章")
                return articles
                
        except Exception as e:
            logger.error(f"查询文章失败: {e}")
            return []
    
    async def get_article_statistics(self) -> Dict[str, Any]:
        """
        获取文章统计信息
        
        Returns:
            Dict[str, Any]: 统计信息
        """
        try:
            async with connection_manager.get_collection(self.collection_name) as collection:
                # 总文章数
                total_count = await collection.count_documents({})
                
                # 成功抓取的文章数
                success_count = await collection.count_documents({"status": "success"})
                
                # 按行业统计
                industry_pipeline = [
                    {"$match": {"status": "success"}},
                    {"$group": {"_id": "$industry_keyword", "count": {"$sum": 1}}},
                    {"$sort": {"count": -1}}
                ]
                industry_cursor = collection.aggregate(industry_pipeline)
                industry_stats = {}
                async for doc in industry_cursor:
                    industry_stats[doc["_id"]] = doc["count"]
                
                # 按域名统计Top 10
                domain_pipeline = [
                    {"$match": {"status": "success"}},
                    {"$group": {"_id": "$source_domain", "count": {"$sum": 1}}},
                    {"$sort": {"count": -1}},
                    {"$limit": 10}
                ]
                domain_cursor = collection.aggregate(domain_pipeline)
                domain_stats = {}
                async for doc in domain_cursor:
                    domain_stats[doc["_id"]] = doc["count"]
                
                # 最近7天的抓取数量
                seven_days_ago = datetime.now() - timedelta(days=7)
                recent_count = await collection.count_documents({
                    "crawl_time": {"$gte": seven_days_ago},
                    "status": "success"
                })
                
                return {
                    "total_articles": total_count,
                    "success_articles": success_count,
                    "failed_articles": total_count - success_count,
                    "industry_distribution": industry_stats,
                    "top_domains": domain_stats,
                    "recent_7days": recent_count,
                    "success_rate": round((success_count / total_count * 100), 2) if total_count > 0 else 0
                }
                
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            return {}
    
    async def check_url_exists(self, url: str) -> bool:
        """
        检查URL是否已存在
        
        Args:
            url: 要检查的URL
            
        Returns:
            bool: URL是否存在
        """
        if not url:
            return False
        
        try:
            async with connection_manager.get_collection(self.collection_name) as collection:
                result = await collection.find_one({"url": url})
                return result is not None
                
        except Exception as e:
            logger.error(f"检查URL失败: {e}")
            return False
    
    async def get_failed_urls(self, limit: int = 100) -> List[str]:
        """
        获取爬取失败的URL列表
        
        Args:
            limit: 返回数量限制
            
        Returns:
            List[str]: 失败的URL列表
        """
        try:
            async with connection_manager.get_collection(self.collection_name) as collection:
                cursor = collection.find(
                    {"status": {"$ne": "success"}},
                    {"url": 1, "_id": 0}
                ).limit(limit)
                
                failed_urls = []
                async for doc in cursor:
                    failed_urls.append(doc["url"])
                
                return failed_urls
                
        except Exception as e:
            logger.error(f"获取失败URL列表失败: {e}")
            return []
    
    async def cleanup_old_articles(self, days: int = 30) -> int:
        """
        清理指定天数前的文章
        
        Args:
            days: 保留天数
            
        Returns:
            int: 删除的文章数量
        """
        if days <= 0:
            logger.error("保留天数必须大于0")
            return 0
        
        try:
            cutoff_date = datetime.now() - timedelta(days=days)
            
            async with connection_manager.get_collection(self.collection_name) as collection:
                result = await collection.delete_many({
                    "crawl_time": {"$lt": cutoff_date}
                })
                
                deleted_count = result.deleted_count
                logger.info(f"清理了{deleted_count}篇{days}天前的文章")
                return deleted_count
                
        except Exception as e:
            logger.error(f"清理旧文章失败: {e}")
            return 0

# 全局数据库辅助实例
db_helper = DatabaseHelper() 