import logging
from typing import List, Dict, Tuple
from datetime import datetime, timedelta
from collections import Counter
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import DBSCAN

from ..extensions import db
from app.models import Article, SocialMediaPost, Keyword

class HotspotDetector:
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.tfidf_vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')
    
    def detect_hot_topics(self, days: int = 3, min_articles: int = 5) -> List[Dict]:
        """
        检测热门话题
        
        Args:
            days: 时间范围（天）
            min_articles: 最小文章数量
            
        Returns:
            热门话题列表
        """
        try:
            # 获取指定时间范围内的文章
            start_date = datetime.now() - timedelta(days=days)
            articles = Article.query.filter(Article.collected_at >= start_date).all()
            
            if len(articles) < min_articles:
                self.logger.warning(f"文章数量不足，无法检测热门话题: {len(articles)} < {min_articles}")
                return []
            
            # 提取关键词
            all_keywords = []
            for article in articles:
                keywords = article.get_keywords()
                all_keywords.extend(keywords)
            
            # 统计关键词频率
            keyword_counter = Counter(all_keywords)
            
            # 获取频率最高的关键词
            top_keywords = keyword_counter.most_common(20)
            
            # 构建热门话题
            hot_topics = []
            for keyword, count in top_keywords:
                # 获取包含该关键词的文章
                related_articles = []
                for article in articles:
                    if keyword in article.get_keywords():
                        related_articles.append({
                            'id': article.id,
                            'title': article.title,
                            'url': article.url,
                            'source': article.source,
                            'sentiment': article.sentiment,
                            'published_at': article.published_at.isoformat() if article.published_at else None
                        })
                
                # 计算情感分布
                sentiment_counts = {'正面': 0, '负面': 0, '中性': 0}
                for article in related_articles:
                    sentiment = article.get('sentiment', '中性')
                    sentiment_counts[sentiment] += 1
                
                hot_topic = {
                    'keyword': keyword,
                    'count': count,
                    'articles': related_articles[:5],  # 只返回前5篇文章
                    'total_articles': len(related_articles),
                    'sentiment_distribution': sentiment_counts
                }
                hot_topics.append(hot_topic)
            
            return hot_topics
            
        except Exception as e:
            self.logger.error(f"检测热门话题失败: {str(e)}")
            return []
    
    def cluster_articles(self, days: int = 3) -> List[Dict]:
        """
        对文章进行聚类，发现热点事件
        
        Args:
            days: 时间范围（天）
            
        Returns:
            聚类结果
        """
        try:
            # 获取指定时间范围内的文章
            start_date = datetime.now() - timedelta(days=days)
            articles = Article.query.filter(Article.collected_at >= start_date).all()
            
            if len(articles) < 10:
                self.logger.warning(f"文章数量不足，无法进行聚类: {len(articles)} < 10")
                return []
            
            # 提取文章内容
            article_contents = [article.title + ' ' + article.content for article in articles]
            article_ids = [article.id for article in articles]
            
            # TF-IDF向量化
            tfidf_matrix = self.tfidf_vectorizer.fit_transform(article_contents)
            
            # DBSCAN聚类
            dbscan = DBSCAN(eps=0.5, min_samples=3, metric='cosine')
            clusters = dbscan.fit_predict(tfidf_matrix)
            
            # 整理聚类结果
            cluster_results = {}
            for i, cluster_id in enumerate(clusters):
                if cluster_id == -1:  # 噪声点
                    continue
                
                if cluster_id not in cluster_results:
                    cluster_results[cluster_id] = []
                
                cluster_results[cluster_id].append(article_ids[i])
            
            # 构建返回结果
            results = []
            for cluster_id, article_ids in cluster_results.items():
                cluster_articles = Article.query.filter(Article.id.in_(article_ids)).all()
                
                # 提取该簇的所有关键词
                cluster_keywords = []
                for article in cluster_articles:
                    cluster_keywords.extend(article.get_keywords())
                
                # 获取频率最高的关键词作为簇的标题
                keyword_counter = Counter(cluster_keywords)
                top_keywords = keyword_counter.most_common(5)
                
                cluster_info = {
                    'cluster_id': cluster_id,
                    'size': len(article_ids),
                    'keywords': [kw for kw, _ in top_keywords],
                    'articles': [{
                        'id': article.id,
                        'title': article.title,
                        'url': article.url,
                        'source': article.source,
                        'sentiment': article.sentiment,
                        'published_at': article.published_at.isoformat() if article.published_at else None
                    } for article in cluster_articles[:5]]  # 只返回前5篇文章
                }
                results.append(cluster_info)
            
            # 按簇大小排序
            results.sort(key=lambda x: x['size'], reverse=True)
            
            return results
            
        except Exception as e:
            self.logger.error(f"文章聚类失败: {str(e)}")
            return []
    
    def get_trending_keywords(self, days: int = 7, top_k: int = 20) -> List[Dict]:
        """
        获取趋势关键词
        
        Args:
            days: 时间范围（天）
            top_k: 返回关键词数量
            
        Returns:
            趋势关键词列表
        """
        try:
            # 获取最近更新的关键词
            start_date = datetime.now() - timedelta(days=days)
            keywords = Keyword.query.filter(Keyword.last_updated >= start_date).order_by(Keyword.frequency.desc()).limit(top_k).all()
            
            trending_keywords = []
            for keyword in keywords:
                # 计算关键词在不同时间段的频率
                # 这里简化处理，实际应用中可能需要更复杂的统计
                
                # 获取包含该关键词的文章数量
                article_count = Article.query.filter(Article.keywords.like(f'%{keyword.word}%')).count()
                social_post_count = SocialMediaPost.query.filter(SocialMediaPost.keywords.like(f'%{keyword.word}%')).count()
                
                trending_keywords.append({
                    'keyword': keyword.word,
                    'frequency': keyword.frequency,
                    'article_count': article_count,
                    'social_post_count': social_post_count,
                    'last_updated': keyword.last_updated.isoformat()
                })
            
            return trending_keywords
            
        except Exception as e:
            self.logger.error(f"获取趋势关键词失败: {str(e)}")
            return []
    
    def get_hot_events(self, days: int = 3) -> List[Dict]:
        """
        获取热点事件
        
        Args:
            days: 时间范围（天）
            
        Returns:
            热点事件列表
        """
        try:
            # 结合热门话题和聚类结果
            hot_topics = self.detect_hot_topics(days=days)
            clusters = self.cluster_articles(days=days)
            
            # 合并结果
            hot_events = []
            
            # 添加热门话题
            for topic in hot_topics[:10]:  # 只取前10个热门话题
                event = {
                    'type': 'topic',
                    'title': topic['keyword'],
                    'count': topic['count'],
                    'articles': topic['articles'],
                    'sentiment_distribution': topic['sentiment_distribution']
                }
                hot_events.append(event)
            
            # 添加聚类结果
            for cluster in clusters[:5]:  # 只取前5个聚类
                event = {
                    'type': 'cluster',
                    'title': ' '.join(cluster['keywords'][:3]),  # 使用前3个关键词作为标题
                    'keywords': cluster['keywords'],
                    'size': cluster['size'],
                    'articles': cluster['articles']
                }
                hot_events.append(event)
            
            # 按文章数量/计数排序
            hot_events.sort(key=lambda x: x.get('count', 0) if 'count' in x else x.get('size', 0), reverse=True)
            
            return hot_events
            
        except Exception as e:
            self.logger.error(f"获取热点事件失败: {str(e)}")
            return [] 