import logging
from django.shortcuts import render
from rest_framework import viewsets, status, generics
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.utils import timezone
from .models import AnalysisTask, DataSource, AnalysisResult, Report
from .serializers import (
    AnalysisTaskSerializer, 
    DataSourceSerializer,
    AnalysisResultSerializer,
    TaskCreateSerializer,
    TaskUpdateSerializer,
    ReportSerializer
)
from data_crawler.models import CrawledData
from django.db.models import Count, Avg, Sum, F
from django.db.models.functions import TruncDate
from datetime import datetime, timedelta
from collections import Counter
from django.db.models.functions import Coalesce, Cast
from django.db import models
from django.db.models import Count, Avg, Sum, F, Q, IntegerField
from django.db.models.functions import TruncDate, Coalesce
from datetime import datetime, timedelta
from collections import Counter
import pandas as pd
import numpy as np
from .tasks import generate_report_task
from rest_framework.exceptions import ValidationError
from django.http import HttpResponse

logger = logging.getLogger(__name__)

# Create your views here.

class DataSourceViewSet(viewsets.ModelViewSet):
    """数据源管理视图集"""
    serializer_class = DataSourceSerializer
    permission_classes = [IsAuthenticated]
    
    def get_queryset(self):
        return DataSource.objects.filter(user=self.request.user)
    
    def perform_create(self, serializer):
        serializer.save(user=self.request.user)
    
    @action(detail=True, methods=['post'])
    def test_connection(self, request, pk=None):
        """测试数据源连接"""
        data_source = self.get_object()
        # 实现数据源连接测试逻辑
        return Response({'status': 'success', 'message': '连接测试成功'})

class AnalysisTaskViewSet(viewsets.ModelViewSet):
    """分析任务管理视图集"""
    permission_classes = [IsAuthenticated]
    
    def get_serializer_class(self):
        if self.action == 'create':
            return TaskCreateSerializer
        elif self.action == 'update':
            return TaskUpdateSerializer
        return AnalysisTaskSerializer
    
    def get_queryset(self):
        return AnalysisTask.objects.filter(user=self.request.user)
    
    def perform_create(self, serializer):
        serializer.save(user=self.request.user)
    
    @action(detail=True, methods=['post'])
    def start(self, request, pk=None):
        """启动分析任务"""
        task = self.get_object()
        if task.status == AnalysisTask.TaskStatus.PENDING:
            task.status = AnalysisTask.TaskStatus.RUNNING
            task.save()
            # TODO: 在这里触发异步任务
            return Response({'status': 'success', 'message': '任务已启动'})
        return Response(
            {'status': 'error', 'message': '任务无法启动'},
            status=status.HTTP_400_BAD_REQUEST
        )
    
    @action(detail=True, methods=['post'])
    def cancel(self, request, pk=None):
        """取消分析任务"""
        task = self.get_object()
        if task.status in [AnalysisTask.TaskStatus.PENDING, AnalysisTask.TaskStatus.RUNNING]:
            task.status = AnalysisTask.TaskStatus.FAILED
            task.error_message = '任务被用户取消'
            task.save()
            return Response({'status': 'success', 'message': '任务已取消'})
        return Response(
            {'status': 'error', 'message': '任务无法取消'},
            status=status.HTTP_400_BAD_REQUEST
        )

class AnalysisResultViewSet(viewsets.ReadOnlyModelViewSet):
    """分析结果视图集"""
    serializer_class = AnalysisResultSerializer
    permission_classes = [IsAuthenticated]
    
    def get_queryset(self):
        return AnalysisResult.objects.filter(task__user=self.request.user)

class DashboardView(generics.GenericAPIView):
    """仪表盘数据视图"""
    permission_classes = [IsAuthenticated]
    
    def get(self, request):
        try:
            # 获取时间范围
            end_date = timezone.now()
            start_date = end_date - timedelta(days=30)
            previous_start = start_date - timedelta(days=30)
            
            # 当前周期数据
            current_data = CrawledData.objects.filter(
                task__user=request.user,
                created_at__range=(start_date, end_date)
            )
            
            # 上一周期数据
            previous_data = CrawledData.objects.filter(
                task__user=request.user,
                created_at__range=(previous_start, start_date)
            )
            
            # 计算基础统计数据
            current_stats = {
                'total_data': current_data.count(),
                'total_engagement': sum(
                    data.metadata.get('engagement_count', 0) 
                    for data in current_data
                ),
                'active_users': len(set(
                    data.metadata.get('user_id') 
                    for data in current_data 
                    if data.metadata.get('user_id')
                ))
            }
            
            previous_stats = {
                'total_data': previous_data.count(),
                'total_engagement': sum(
                    data.metadata.get('engagement_count', 0) 
                    for data in previous_data
                ),
                'active_users': len(set(
                    data.metadata.get('user_id') 
                    for data in previous_data 
                    if data.metadata.get('user_id')
                ))
            }
            
            # 计算环比增长
            data_growth = calculate_growth(
                current_stats['total_data'],
                previous_stats['total_data']
            )
            engagement_growth = calculate_growth(
                current_stats['total_engagement'],
                previous_stats['total_engagement']
            )
            user_growth = calculate_growth(
                current_stats['active_users'],
                previous_stats['active_users']
            )
            
            # 计算互动率
            current_engagement_rate = (
                current_stats['total_engagement'] / current_stats['total_data']
                if current_stats['total_data'] > 0 else 0
            )
            previous_engagement_rate = (
                previous_stats['total_engagement'] / previous_stats['total_data']
                if previous_stats['total_data'] > 0 else 0
            )
            engagement_rate_growth = calculate_growth(
                current_engagement_rate,
                previous_engagement_rate
            )
            
            return Response({
                'total_data': current_stats['total_data'],
                'total_engagement': current_stats['total_engagement'],
                'active_users': current_stats['active_users'],
                'engagement_rate': round(current_engagement_rate, 4),
                'data_growth': round(data_growth, 2),
                'engagement_growth': round(engagement_growth, 2),
                'user_growth': round(user_growth, 2),
                'engagement_rate_growth': round(engagement_rate_growth, 2)
            })
            
        except Exception as e:
            print(f"Error in dashboard view: {str(e)}")  # 添加日志输出
            return Response(
                {
                    'error': str(e),
                    'total_data': 0,
                    'total_engagement': 0,
                    'active_users': 0,
                    'engagement_rate': 0,
                    'data_growth': 0,
                    'engagement_growth': 0,
                    'user_growth': 0,
                    'engagement_rate_growth': 0
                },
                status=status.HTTP_200_OK  # 返回默认值而不是错误状态
            )

class WeiboVisualizationView(generics.GenericAPIView):
    """微博数据可视化API"""
    permission_classes = [IsAuthenticated]
    
    def get(self, request):
        try:
            task_id = request.query_params.get('task_id')
            days = int(request.query_params.get('days', 7))
            
            # 基础查询集
            queryset = CrawledData.objects.filter(
                task__user=request.user,  # Add user filter
                metadata__platform='weibo'
            )
            if task_id:
                queryset = queryset.filter(task_id=task_id)
                
            # 获取时间范围
            end_date = timezone.now()
            start_date = end_date - timedelta(days=days)
            
            # 1. 每日发帖趋势
            daily_posts = queryset.filter(
                created_at__gte=start_date
            ).annotate(
                date=TruncDate('created_at')
            ).values('date').annotate(
                post_count=Count('id'),
                total_reposts=Cast(Coalesce(Sum('data__post__engagement__reposts'), 0), output_field=IntegerField()),
                total_comments=Cast(Coalesce(Sum('data__post__engagement__comments'), 0), output_field=IntegerField()),
                total_likes=Cast(Coalesce(Sum('data__post__engagement__likes'), 0), output_field=IntegerField())
            ).order_by('date')
            
            # 2. 关键词分布
            keyword_stats = queryset.values(
                'metadata__keyword'
            ).annotate(
                post_count=Count('id')
            ).order_by('-post_count')[:10]
            
            # 3. 互动量分布
            engagement_stats = queryset.aggregate(
                total_reposts=Cast(Coalesce(Sum('data__post__engagement__reposts'), 0), output_field=IntegerField()),
                total_comments=Cast(Coalesce(Sum('data__post__engagement__comments'), 0), output_field=IntegerField()),
                total_likes=Cast(Coalesce(Sum('data__post__engagement__likes'), 0), output_field=IntegerField()),
                avg_reposts=Cast(Coalesce(Avg('data__post__engagement__reposts'), 0), output_field=IntegerField()),
                avg_comments=Cast(Coalesce(Avg('data__post__engagement__comments'), 0), output_field=IntegerField()),
                avg_likes=Cast(Coalesce(Avg('data__post__engagement__likes'), 0), output_field=IntegerField())
            )
            
            # 4. 热门话题
            topics = []
            for post in queryset.values('data__post__media__topics'):
                if post.get('data__post__media__topics'):
                    topics.extend(post['data__post__media__topics'])
            
            topic_counts = Counter(topics)
            top_topics = topic_counts.most_common(10)
            
            # 5. 用户活跃度分布
            user_stats = queryset.filter(
                created_at__gte=start_date
            ).values(
                'data__user__id',
                'data__user__name'
            ).annotate(
                post_count=Count('id'),
                total_engagement=Cast(
                    Coalesce(Sum('data__post__engagement__reposts'), 0) +
                    Coalesce(Sum('data__post__engagement__comments'), 0) +
                    Coalesce(Sum('data__post__engagement__likes'), 0),
                    output_field=IntegerField()
                )
            ).order_by('-total_engagement')[:10]

            user_activity = {
                'users': [stat['data__user__name'] for stat in user_stats],
                'posts': [stat['post_count'] for stat in user_stats],
                'engagement': [stat['total_engagement'] for stat in user_stats]
            }
            
            return Response({
                'daily_trend': {
                    'dates': [str(item['date']) for item in daily_posts],
                    'posts': [item['post_count'] for item in daily_posts],
                    'reposts': [item['total_reposts'] for item in daily_posts],
                    'comments': [item['total_comments'] for item in daily_posts],
                    'likes': [item['total_likes'] for item in daily_posts]
                },
                'keyword_distribution': {
                    'keywords': [item['metadata__keyword'] for item in keyword_stats if item['metadata__keyword']],
                    'counts': [item['post_count'] for item in keyword_stats if item['metadata__keyword']]
                },
                'engagement_stats': {
                    'totals': {
                        'reposts': engagement_stats['total_reposts'],
                        'comments': engagement_stats['total_comments'],
                        'likes': engagement_stats['total_likes']
                    },
                    'averages': {
                        'reposts': round(engagement_stats['avg_reposts'], 2),
                        'comments': round(engagement_stats['avg_comments'], 2),
                        'likes': round(engagement_stats['avg_likes'], 2)
                    }
                },
                'top_topics': {
                    'topics': [topic for topic, _ in top_topics],
                    'counts': [count for _, count in top_topics]
                },
                'user_activity': user_activity
            })
            
        except Exception as e:
            print(f"Error in WeiboVisualizationView: {str(e)}")
            import traceback
            traceback.print_exc()
            return Response(
                {
                    'error': str(e),
                    'daily_trend': {'dates': [], 'posts': [], 'reposts': [], 'comments': [], 'likes': []},
                    'keyword_distribution': {'keywords': [], 'counts': []},
                    'engagement_stats': {
                        'totals': {'reposts': 0, 'comments': 0, 'likes': 0},
                        'averages': {'reposts': 0, 'comments': 0, 'likes': 0}
                    },
                    'top_topics': {'topics': [], 'counts': []},
                    'user_activity': {'users': [], 'posts': [], 'engagement': []}
                },
                status=status.HTTP_200_OK
            )

class TrendView(generics.GenericAPIView):
    """趋势数据视图"""
    permission_classes = [IsAuthenticated]
    
    def get(self, request):
        try:
            # 获取查询参数
            source_id = request.query_params.get('source_id')
            time_range = request.query_params.get('timeRange', 'week')
            
            # 获取数据的时间范围
            latest_record = CrawledData.objects.filter(
                task__user=request.user
            ).order_by('-created_at').first()
            
            if not latest_record:
                return Response({
                    'dates': [],
                    'data': [],
                    'engagement': [],
                    'growth': 0
                })
            
            # 使用数据的最新时间作为结束时间
            end_date = latest_record.created_at
            if time_range == 'week':
                start_date = end_date - timedelta(days=7)
            elif time_range == 'month':
                start_date = end_date - timedelta(days=30)
            elif time_range == 'year':
                start_date = end_date - timedelta(days=365)
            else:
                start_date = end_date - timedelta(days=7)  # 默认一周
            
            print(f"Query time range: {start_date} to {end_date}")
            
            # 构建基础查询
            queryset = CrawledData.objects.filter(task__user=request.user)
            if source_id:
                queryset = queryset.filter(task__data_source_id=source_id)
            
            print(f"Base queryset count: {queryset.count()}")
            
            # 获取原始数据
            data_points = queryset.filter(
                created_at__range=(start_date, end_date)
            ).values(
                'created_at',
                'metadata__engagement_count'
            ).order_by('created_at')
            
            print(f"Data points count: {data_points.count()}")
            
            # 按日期分组数据
            daily_data = {}
            for point in data_points:
                date = point['created_at'].date()
                if date not in daily_data:
                    daily_data[date] = {
                        'count': 0,
                        'engagement': 0
                    }
                daily_data[date]['count'] += 1
                daily_data[date]['engagement'] += point['metadata__engagement_count'] or 0
            
            print(f"Daily data: {daily_data}")
            
            # 准备返回数据
            dates = []
            values = []
            engagement = []
            
            current_date = start_date.date()
            end_date = end_date.date()
            
            while current_date <= end_date:
                dates.append(current_date.strftime('%Y-%m-%d'))
                if current_date in daily_data:
                    values.append(daily_data[current_date]['count'])
                    engagement.append(daily_data[current_date]['engagement'])
                else:
                    values.append(0)
                    engagement.append(0)
                current_date += timedelta(days=1)
            
            # 计算增长率
            total_current = sum(values[-7:]) if values else 0  # 最近7天
            total_previous = sum(values[-14:-7]) if len(values) >= 14 else 0  # 前7天
            growth = calculate_growth(total_current, total_previous)
            
            print(f"Final trend data - Dates: {dates}, Values: {values}, Engagement: {engagement}, Growth: {growth}")
            
            return Response({
                'dates': dates,
                'data': values,
                'engagement': engagement,
                'growth': round(growth, 2)
            })
            
        except Exception as e:
            print(f"Error in trend view: {str(e)}")
            import traceback
            traceback.print_exc()
            return Response(
                {
                    'dates': [],
                    'data': [],
                    'engagement': [],
                    'growth': 0
                },
                status=status.HTTP_200_OK
            )

class DistributionView(generics.GenericAPIView):
    """数据来源分布视图"""
    permission_classes = [IsAuthenticated]
    
    def get(self, request):
        try:
            # 获取数据来源分布
            distribution = (
                CrawledData.objects.filter(task__user=request.user)
                .values('task__data_source__name')
                .annotate(value=Count('id'))
                .order_by('-value')
            )
            
            # 格式化数据
            data = [
                {
                    'name': item['task__data_source__name'],
                    'value': item['value']
                }
                for item in distribution
            ]
            
            return Response({'data': data})
            
        except Exception as e:
            return Response(
                {'error': str(e)},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR
            )

class TopicAnalysisView(generics.GenericAPIView):
    """话题分析视图"""
    permission_classes = [IsAuthenticated]
    
    def get(self, request):
        try:
            # 获取查询参数
            source_id = request.query_params.get('source_id')
            start_date = request.query_params.get('start_date')
            end_date = request.query_params.get('end_date')
            
            # 构建基础查询
            queryset = CrawledData.objects.all()
            if source_id:
                queryset = queryset.filter(task__data_source_id=source_id)
            if start_date and end_date:
                queryset = queryset.filter(
                    data__post__created_at__range=(start_date, end_date)
                )
            
            # 统计话题
            topics = {}
            for data in queryset:
                try:
                    for topic in data.data.get('post', {}).get('media', {}).get('topics', []):
                        topics[topic] = topics.get(topic, 0) + 1
                except Exception as e:
                    print(f"Error processing topics for data {data.id}: {str(e)}")
                    continue
            
            # 格式化数据
            data = [
                {'name': topic, 'value': count}
                for topic, count in sorted(
                    topics.items(),
                    key=lambda x: x[1],
                    reverse=True
                )[:10]
            ]
            
            return Response({'data': data})
            
        except Exception as e:
            print(f"Error in topic analysis view: {str(e)}")  # 添加日志输出
            return Response(
                {'error': str(e), 'data': []},
                status=status.HTTP_200_OK  # 返回默认值而不是错误状态
            )

class SentimentAnalysisView(generics.GenericAPIView):
    """情感分析视图"""
    permission_classes = [IsAuthenticated]
    
    def get(self, request):
        try:
            # 模拟情感分析结果
            data = [
                {'name': '正面', 'value': 150},
                {'name': '中性', 'value': 300},
                {'name': '负面', 'value': 50}
            ]
            
            return Response({'data': data})
            
        except Exception as e:
            return Response(
                {'error': str(e)},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR
            )

class UserRankingView(generics.GenericAPIView):
    """用户排行视图"""
    permission_classes = [IsAuthenticated]
    
    def get(self, request):
        try:
            # 获取查询参数
            source_id = request.query_params.get('source_id')
            start_date = request.query_params.get('start_date')
            end_date = request.query_params.get('end_date')
            
            # 构建基础查询
            queryset = CrawledData.objects.filter(task__user=request.user)
            if source_id:
                queryset = queryset.filter(task__data_source_id=source_id)
            if start_date and end_date:
                queryset = queryset.filter(
                    task__created_at__range=(start_date, end_date)
                )
            
            # 统计用户数据
            user_stats = {}
            for data in queryset:
                user_id = data.data['user']['id']
                if user_id not in user_stats:
                    user_stats[user_id] = {
                        'name': data.data['user']['name'],
                        'posts': 0,
                        'engagement': 0,
                        'followers': data.data['user']['followers_count']
                    }
                user_stats[user_id]['posts'] += 1
                user_stats[user_id]['engagement'] += (
                    data.data['post']['engagement']['reposts'] +
                    data.data['post']['engagement']['comments'] +
                    data.data['post']['engagement']['likes']
                )
            
            # 计算影响力分数并排序
            data = []
            for user_id, stats in user_stats.items():
                influence = calculate_influence_score(
                    stats['posts'],
                    stats['engagement'],
                    stats['followers']
                )
                data.append({
                    'name': stats['name'],
                    'posts': stats['posts'],
                    'engagement': stats['engagement'],
                    'influence': influence
                })
            
            # 按影响力排序
            data.sort(key=lambda x: x['influence'], reverse=True)
            
            return Response({'data': data[:10]})
            
        except Exception as e:
            return Response(
                {'error': str(e)},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR
            )

class ReportViewSet(viewsets.ModelViewSet):
    serializer_class = ReportSerializer
    permission_classes = [IsAuthenticated]
    
    def get_queryset(self):
        return Report.objects.filter(user=self.request.user)
    
    def perform_create(self, serializer):
        try:
            logger.info("Creating new report")
            logger.info(f"Report data: {serializer.validated_data}")
            
            # 检查数据源是否存在且属于当前用户
            data_source = serializer.validated_data.get('data_source')
            if not data_source or data_source.user != self.request.user:
                raise ValidationError("Invalid data source")
            
            # 保存报表
            report = serializer.save(user=self.request.user)
            logger.info(f"Report created with ID: {report.id}")
            
            # 异步生成报表
            task = generate_report_task.delay(report.id)
            logger.info(f"Celery task created with ID: {task.id}")
            
            # 保存任务ID到报表中
            report.task_id = task.id
            report.save(update_fields=['task_id'])
            
        except ValidationError as e:
            logger.error(f"Validation error while creating report: {str(e)}")
            raise
        except Exception as e:
            logger.error(f"Error creating report: {str(e)}")
            raise serializers.ValidationError(f"Error creating report: {str(e)}")
    
    @action(detail=True, methods=['post'])
    def export(self, request, pk=None):
        report = self.get_object()
        if report.status != 'completed':
            return Response(
                {'error': '报表尚未生成完成，无法导出'},
                status=status.HTTP_400_BAD_REQUEST
            )
        
        try:
            # 创建Excel文件
            df = pd.DataFrame()
            
            # 根据报表类型和指标添加相应的数据
            if 'data_volume' in report.metrics:
                data_volume_df = pd.DataFrame({
                    '日期': report.result['data_volume']['dates'],
                    '数据量': report.result['data_volume']['values']
                })
                df = pd.concat([df, data_volume_df], axis=1)
            
            if 'engagement' in report.metrics:
                engagement_df = pd.DataFrame({
                    '日期': report.result['engagement']['dates'],
                    '评论数': report.result['engagement']['comments'],
                    '点赞数': report.result['engagement']['likes'],
                    '转发数': report.result['engagement']['reposts']
                })
                df = pd.concat([df, engagement_df], axis=1)
            
            # 创建响应
            response = HttpResponse(content_type='application/vnd.ms-excel')
            response['Content-Disposition'] = f'attachment; filename=report_{pk}.xlsx'
            
            # 保存到响应
            df.to_excel(response, index=False, engine='openpyxl')
            return response
            
        except Exception as e:
            logger.error(f"Error exporting report: {str(e)}")
            return Response(
                {'error': f'导出报表时出错: {str(e)}'},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR
            )

def calculate_growth(current, previous):
    """计算环比增长率"""
    if previous == 0:
        return 100 if current > 0 else 0
    return round((current - previous) / previous * 100, 2)

def calculate_influence_score(posts, engagement, followers):
    """计算用户影响力分数"""
    # 简单的影响力计算公式，可以根据需要调整权重
    score = (
        posts * 10 +  # 发帖量权重
        engagement * 0.1 +  # 互动量权重
        followers * 0.01  # 粉丝数权重
    )
    # 归一化到0-100
    return min(round(score / 1000 * 100), 100)
