from django.db.models import Count, Avg, Max, Min, Q, F, Sum
from django.db.models.functions import TruncDate
from datetime import datetime, timedelta
from data_crawler.models import CrawledData
from collections import Counter
import pandas as pd
import jieba
import re

class WeiboAnalyzer:
    """微博数据分析工具"""
    
    def __init__(self, task=None):
        """
        初始化分析器
        Args:
            task: CrawlerTask实例，如果指定则只分析该任务的数据
        """
        self.task = task
        self.base_queryset = CrawledData.objects.filter(
            metadata__platform='weibo'
        )
        if task:
            self.base_queryset = self.base_queryset.filter(task=task)
    
    def get_basic_stats(self):
        """获取基本统计信息"""
        total_posts = self.base_queryset.count()
        if total_posts == 0:
            return {
                'total_posts': 0,
                'date_range': {'start': None, 'end': None},
                'total_users': 0,
                'avg_engagement': {'reposts': 0, 'comments': 0, 'likes': 0}
            }
            
        # 获取日期范围
        dates = self.base_queryset.aggregate(
            earliest=Min('data__post__created_at'),
            latest=Max('data__post__created_at')
        )
        
        # 获取独立用户数
        unique_users = self.base_queryset.values('data__user__id').distinct().count()
        
        # 计算平均互动数
        engagement = self.base_queryset.aggregate(
            avg_reposts=Avg('data__post__engagement__reposts'),
            avg_comments=Avg('data__post__engagement__comments'),
            avg_likes=Avg('data__post__engagement__likes')
        )
        
        return {
            'total_posts': total_posts,
            'date_range': {
                'start': dates['earliest'],
                'end': dates['latest']
            },
            'total_users': unique_users,
            'avg_engagement': {
                'reposts': round(engagement['avg_reposts'] or 0, 2),
                'comments': round(engagement['avg_comments'] or 0, 2),
                'likes': round(engagement['avg_likes'] or 0, 2)
            }
        }
    
    def get_top_users(self, limit=10):
        """获取最活跃的用户"""
        return self.base_queryset.values(
            'data__user__id',
            'data__user__name',
            'data__user__followers_count'
        ).annotate(
            post_count=Count('id'),
            total_reposts=Sum('data__post__engagement__reposts'),
            total_comments=Sum('data__post__engagement__comments'),
            total_likes=Sum('data__post__engagement__likes')
        ).order_by('-post_count')[:limit]
    
    def get_keyword_stats(self):
        """获取关键词统计"""
        return self.base_queryset.values(
            'metadata__keyword'
        ).annotate(
            post_count=Count('id'),
            avg_reposts=Avg('data__post__engagement__reposts'),
            avg_comments=Avg('data__post__engagement__comments'),
            avg_likes=Avg('data__post__engagement__likes')
        ).order_by('-post_count')
    
    def get_daily_stats(self, days=30):
        """获取每日统计数据"""
        start_date = datetime.now() - timedelta(days=days)
        return self.base_queryset.filter(
            data__post__created_at__gte=start_date
        ).annotate(
            date=TruncDate('data__post__created_at')
        ).values('date').annotate(
            post_count=Count('id'),
            total_reposts=Sum('data__post__engagement__reposts'),
            total_comments=Sum('data__post__engagement__comments'),
            total_likes=Sum('data__post__engagement__likes')
        ).order_by('date')
    
    def get_popular_topics(self, limit=20):
        """获取热门话题"""
        topics = []
        for post in self.base_queryset.values('data__post__media__topics'):
            topics.extend(post['data__post__media__topics'])
        return Counter(topics).most_common(limit)
    
    def analyze_content(self, limit=1000):
        """分析微博内容"""
        # 获取微博文本
        texts = self.base_queryset.values_list(
            'data__post__content', flat=True
        )[:limit]
        
        # 分词并统计
        words = []
        for text in texts:
            # 清理文本
            clean_text = re.sub(r'[^\u4e00-\u9fa5]', '', text)
            # 分词
            words.extend(jieba.cut(clean_text))
        
        # 过滤停用词（这里可以加入自定义的停用词表）
        stopwords = {'的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'}
        words = [w for w in words if len(w) > 1 and w not in stopwords]
        
        return Counter(words).most_common(limit)
    
    def get_high_engagement_posts(self, limit=10):
        """获取高互动量的帖子"""
        return self.base_queryset.annotate(
            total_engagement=F('data__post__engagement__reposts') + 
                           F('data__post__engagement__comments') + 
                           F('data__post__engagement__likes')
        ).order_by('-total_engagement')[:limit]
    
    def export_to_csv(self, filepath):
        """导出数据到CSV文件"""
        data = []
        for post in self.base_queryset:
            post_data = post.data['post']
            user_data = post.data['user']
            
            data.append({
                'post_id': post_data['id'],
                'content': post_data['content'],
                'created_at': post_data['created_at'],
                'reposts': post_data['engagement']['reposts'],
                'comments': post_data['engagement']['comments'],
                'likes': post_data['engagement']['likes'],
                'user_id': user_data['id'],
                'user_name': user_data['name'],
                'user_followers': user_data['followers_count'],
                'keyword': post.metadata['keyword'],
                'topics': ','.join(post_data['media']['topics']),
                'has_pictures': len(post_data['media']['pictures']) > 0
            })
        
        df = pd.DataFrame(data)
        df.to_csv(filepath, index=False, encoding='utf-8-sig')
        return filepath 