from pymongo import MongoClient
from datetime import datetime
from typing import List, Dict, Any
import logging

logger = logging.getLogger(__name__)

class DatabaseManager:
    def __init__(self, mongodb_url: str, database_name: str):
        self.client = MongoClient(mongodb_url)
        self.db = self.client[database_name]
        self.articles = self.db.articles
        self.analysis_results = self.db.analysis_results
        self.sentiments = self.db.sentiments
        self.topics = self.db.topics
        
    def save_article(self, article: Dict[str, Any]) -> Dict[str, Any] | None:
        """保存文章数据，返回 {'id': str, 'inserted': bool}"""
        try:
            existing = self.articles.find_one({'url': article['url'], 'title': article['title']})
            if existing:
                return {'id': str(existing.get('_id')), 'inserted': False}
            article['created_at'] = datetime.now()
            article['updated_at'] = datetime.now()
            result = self.articles.insert_one(article)
            return {'id': str(result.inserted_id), 'inserted': True}
        except Exception as e:
            logger.error(f"保存文章失败: {e}")
            return None
    
    def get_articles(self, limit: int = 100, skip: int = 0) -> List[Dict[str, Any]]:
        """获取文章列表"""
        try:
            cursor = self.articles.find().sort('created_at', -1).skip(skip).limit(limit)
            articles = []
            for doc in cursor:
                # 转换ObjectId为字符串
                doc['_id'] = str(doc['_id'])
                articles.append(doc)
            return articles
        except Exception as e:
            logger.error(f"获取文章失败: {e}")
            return []

    def count_articles(self) -> int:
        """统计文章总数"""
        try:
            return self.articles.count_documents({})
        except Exception as e:
            logger.error(f"统计文章总数失败: {e}")
            return 0
    
    def save_sentiment_analysis(self, article_id: str, sentiment_data: Dict[str, Any]) -> str:
        """保存情感分析结果"""
        try:
            sentiment_data['article_id'] = article_id
            sentiment_data['created_at'] = datetime.now()
            result = self.sentiments.insert_one(sentiment_data)
            return str(result.inserted_id)
        except Exception as e:
            logger.error(f"保存情感分析失败: {e}")
            return None
    
    def save_topic_analysis(self, topic_data: Dict[str, Any]) -> str:
        """保存主题分析结果"""
        try:
            topic_data['created_at'] = datetime.now()
            result = self.topics.insert_one(topic_data)
            return str(result.inserted_id)
        except Exception as e:
            logger.error(f"保存主题分析失败: {e}")
            return None
    
    def get_sentiment_stats(self, days: int = 7) -> Dict[str, Any]:
        """获取情感统计"""
        try:
            from datetime import timedelta
            start_date = datetime.now() - timedelta(days=days)
            # 统计口径：按文章去重，取每篇文章最新一次情感结果再做聚合
            pipeline = [
                {'$match': {'created_at': {'$gte': start_date}}},
                # 先按 article_id 升序、created_at 降序排序，便于 $group 取最新记录
                {'$sort': {'article_id': 1, 'created_at': -1}},
                # 以 article_id 分组，取每篇文章的最新情感标签与分数
                {'$group': {
                    '_id': '$article_id',
                    'sentiment_label': {'$first': '$sentiment_label'},
                    'sentiment_score': {'$first': '$sentiment_score'},
                    'created_at': {'$first': '$created_at'}
                }},
                # 基于“每篇文章的最新结果”做情感分布与平均分
                {'$group': {
                    '_id': '$sentiment_label',
                    'count': {'$sum': 1},
                    'avg_score': {'$avg': '$sentiment_score'}
                }},
                {'$sort': {'_id': 1}}
            ]

            results = list(self.sentiments.aggregate(pipeline))
            return results
        except Exception as e:
            logger.error(f"获取情感统计失败: {e}")
            return []
    
    def get_trend_data(self, days: int = 30) -> List[Dict[str, Any]]:
        """获取趋势数据"""
        try:
            from datetime import timedelta
            start_date = datetime.now() - timedelta(days=days)
            
            pipeline = [
                {'$match': {'created_at': {'$gte': start_date}}},
                {'$group': {
                    '_id': {
                        'year': {'$year': '$created_at'},
                        'month': {'$month': '$created_at'},
                        'day': {'$dayOfMonth': '$created_at'}
                    },
                    'count': {'$sum': 1},
                    'avg_sentiment': {'$avg': '$sentiment_score'}
                }},
                {'$sort': {'_id': 1}}
            ]
            
            results = list(self.sentiments.aggregate(pipeline))
            return results
        except Exception as e:
            logger.error(f"获取趋势数据失败: {e}")
            return []

    def get_daily_sentiment_breakdown(self, days: int = 30) -> List[Dict[str, Any]]:
        """按天统计正/负/中数量"""
        try:
            from datetime import timedelta
            start_date = datetime.now() - timedelta(days=days)
            pipeline = [
                {'$match': {'created_at': {'$gte': start_date}}},
                {'$group': {
                    '_id': {
                        'year': {'$year': '$created_at'},
                        'month': {'$month': '$created_at'},
                        'day': {'$dayOfMonth': '$created_at'},
                        'label': '$sentiment_label'
                    },
                    'count': {'$sum': 1}
                }},
                {'$sort': {'_id.year': 1, '_id.month': 1, '_id.day': 1}}
            ]
            agg = list(self.sentiments.aggregate(pipeline))
            # 折叠为每日日汇总
            tmp = {}
            for row in agg:
                key = (row['_id']['year'], row['_id']['month'], row['_id']['day'])
                date_str = f"{row['_id']['year']}-{row['_id']['month']:02d}-{row['_id']['day']:02d}"
                if key not in tmp:
                    tmp[key] = {'date': date_str, '正面': 0, '负面': 0, '中性': 0}
                label = row['_id'].get('label') or '中性'
                if label not in tmp[key]:
                    tmp[key][label] = 0
                tmp[key][label] += row['count']
            # 转列表并按日期排序
            values = list(tmp.values())
            values.sort(key=lambda x: x['date'])
            return values
        except Exception as e:
            logger.error(f"获取按天情感分布失败: {e}")
            return []

    def get_top_keywords(self, days: int = 30, limit: int = 50) -> List[Dict[str, Any]]:
        """根据情感分析结果的关键词统计热度"""
        try:
            from datetime import timedelta
            start_date = datetime.now() - timedelta(days=days)
            pipeline = [
                {'$match': {'created_at': {'$gte': start_date}}},
                {'$unwind': {'path': '$keywords', 'preserveNullAndEmptyArrays': False}},
                {'$match': {'keywords': {'$ne': ''}}},
                {'$group': {'_id': '$keywords', 'count': {'$sum': 1}}},
                {'$sort': {'count': -1}},
                {'$limit': int(limit)}
            ]
            res = list(self.sentiments.aggregate(pipeline))
            return [{
                'keyword': r['_id'],
                'count': r['count']
            } for r in res if r.get('_id')]
        except Exception as e:
            logger.error(f"获取热门关键词失败: {e}")
            return []
    
    def close(self):
        """关闭数据库连接"""
        self.client.close()
