from rest_framework import viewsets, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter, OrderingFilter
from django.db.models import Count, Q, Avg
from django.utils import timezone
from datetime import timedelta, datetime
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import json
from .models import ErrorLog
from .serializers import ErrorLogSerializer, ErrorLogCreateSerializer
from .permissions import HasProjectAPIKey
from logwisdomhub.response import ApiResponse, ErrorCode  # 添加导入
import logging

logger = logging.getLogger(__name__)

@method_decorator(csrf_exempt, name='dispatch')
class ErrorLogViewSet(viewsets.ModelViewSet):
    """错误日志视图集"""
    queryset = ErrorLog.objects.all()
    serializer_class = ErrorLogSerializer
    filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter]
    filterset_fields = ['project', 'level']
    search_fields = ['message', 'stack_trace']
    ordering_fields = ['timestamp', 'level']
    ordering = ['-timestamp']
    
    def get_permissions(self):
        """
        根据操作类型返回不同的权限要求
        """
        if self.action == 'create':
            # 创建错误日志只需要 API Key 认证
            permission_classes = [HasProjectAPIKey]
        else:
            # 其他操作需要用户认证
            permission_classes = [IsAuthenticated]
        
        return [permission() for permission in permission_classes]
    
    def get_serializer_class(self):
        if self.action == 'create':
            return ErrorLogCreateSerializer
        return ErrorLogSerializer
    
    def get_queryset(self):
        queryset = super().get_queryset()
        
        # 时间范围过滤
        start_time = self.request.query_params.get('start_time')
        end_time = self.request.query_params.get('end_time')
        
        if start_time:
            try:
                start_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00'))
                queryset = queryset.filter(timestamp__gte=start_dt)
            except ValueError:
                pass
                
        if end_time:
            try:
                end_dt = datetime.fromisoformat(end_time.replace('Z', '+00:00'))
                queryset = queryset.filter(timestamp__lte=end_dt)
            except ValueError:
                pass
        
        return queryset
    
    # 在ErrorLogViewSet的list方法中添加优化
    def list(self, request, *args, **kwargs):
        """获取错误日志列表"""
        try:
            queryset = self.filter_queryset(self.get_queryset())
            
            # 按指纹分组，获取聚合数据
            if request.query_params.get('group_by') == 'fingerprint':
                queryset = queryset.values('fingerprint').annotate(
                    count=Count('id'),
                    firstSeen=Min('timestamp'),
                    lastSeen=Max('timestamp'),
                    latest_id=Max('id')
                ).order_by('-lastSeen')
                
                # 获取每个分组的最新记录详情
                latest_ids = [item['latest_id'] for item in queryset]
                logs = ErrorLog.objects.filter(id__in=latest_ids)
                
                page = self.paginate_queryset(logs)
                if page is not None:
                    serializer = self.get_serializer(page, many=True)
                    paginated_data = self.get_paginated_response(serializer.data).data
                    return ApiResponse.paginated_success(paginated_data, "获取错误日志列表成功")
            
            # 原有逻辑保持不变
            page = self.paginate_queryset(queryset)
            if page is not None:
                serializer = self.get_serializer(page, many=True)
                paginated_data = self.get_paginated_response(serializer.data).data
                return ApiResponse.paginated_success(paginated_data, "获取错误日志列表成功")
            
            serializer = self.get_serializer(queryset, many=True)
            return ApiResponse.success({
                'results': serializer.data,
                'count': len(serializer.data)
            }, "获取错误日志列表成功")
            
        except Exception as e:
            logger.error(f"获取错误日志列表失败: {str(e)}")
            return ApiResponse.error(
                f"获取错误日志列表失败: {str(e)}", 
                ErrorCode.DATABASE_ERROR,
                http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
            )
    
    @action(detail=False, methods=['get'])
    def stats(self, request):
        """获取错误日志统计信息"""
        project_id = request.query_params.get('project')
        queryset = self.get_queryset()
        
        if project_id:
            queryset = queryset.filter(project_id=project_id)
        
        # 按级别统计
        by_level = queryset.values('level').annotate(
            count=Count('id')
        ).order_by('level')
        
        # 最近24小时趋势
        now = timezone.now()
        last_24h = now - timedelta(hours=24)
        recent_trend = []
        
        for i in range(24):
            hour_start = last_24h + timedelta(hours=i)
            hour_end = hour_start + timedelta(hours=1)
            count = queryset.filter(
                timestamp__gte=hour_start,
                timestamp__lt=hour_end
            ).count()
            recent_trend.append({
                'time': hour_start.strftime('%H:00'),
                'count': count
            })
        
        # 高频错误
        top_errors = queryset.values('message').annotate(
            count=Count('id')
        ).order_by('-count')[:10]
        
        # 浏览器统计
        browser_stats = queryset.exclude(
            user_agent__isnull=True
        ).extra(
            select={
                'browser': "CASE WHEN user_agent LIKE '%%Chrome%%' THEN 'Chrome' WHEN user_agent LIKE '%%Firefox%%' THEN 'Firefox' WHEN user_agent LIKE '%%Safari%%' THEN 'Safari' ELSE 'Other' END"
            }
        ).values('browser').annotate(
            count=Count('id')
        ).order_by('-count')
        
        return Response({
            'total': queryset.count(),
            'by_level': list(by_level),
            'recent_trend': recent_trend,
            'top_errors': list(top_errors),
            'browser_stats': list(browser_stats)
        })
    
    @action(detail=False, methods=['get'])
    def dashboard(self, request):
        """获取仪表板数据"""
        project_id = request.query_params.get('project')
        queryset = self.get_queryset()
        
        if project_id:
            queryset = queryset.filter(project_id=project_id)
        
        now = timezone.now()
        last_24h = now - timedelta(hours=24)
        last_week = now - timedelta(days=7)
        
        # 基础统计
        total_errors = queryset.count()
        errors_24h = queryset.filter(timestamp__gte=last_24h).count()
        errors_week = queryset.filter(timestamp__gte=last_week).count()
        
        # 错误率计算（假设基于总请求数）
        error_rate = min((errors_24h / max(1000, errors_24h)) * 100, 100)
        
        # 受影响用户数（基于IP地址去重）
        affected_users = queryset.filter(
            timestamp__gte=last_24h
        ).values('ip_address').distinct().count()
        
        # 平均响应时间（模拟数据，实际应该从性能监控获取）
        avg_response_time = 1.2
        
        # 错误趋势（最近6小时，每小时一个点）
        error_trend = []
        for i in range(6):
            hour_start = now - timedelta(hours=5-i)
            hour_end = hour_start + timedelta(hours=1)
            count = queryset.filter(
                timestamp__gte=hour_start,
                timestamp__lt=hour_end
            ).count()
            error_trend.append({
                'time': hour_start.strftime('%H:00'),
                'count': count
            })
        
        # 高频错误排行
        top_errors = []
        error_messages = queryset.values('message').annotate(
            count=Count('id')
        ).order_by('-count')[:3]
        
        for error in error_messages:
            # 计算趋势（简化版本）
            recent_count = queryset.filter(
                message=error['message'],
                timestamp__gte=last_24h
            ).count()
            older_count = queryset.filter(
                message=error['message'],
                timestamp__gte=last_week,
                timestamp__lt=last_24h
            ).count()
            
            if recent_count > older_count:
                trend = 'up'
            elif recent_count < older_count:
                trend = 'down'
            else:
                trend = 'stable'
            
            top_errors.append({
                'message': error['message'],
                'count': error['count'],
                'trend': trend
            })
        
        # 浏览器分布
        browser_stats = []
        total_with_ua = queryset.exclude(user_agent__isnull=True).count()
        
        if total_with_ua > 0:
            browsers = queryset.exclude(
                user_agent__isnull=True
            ).extra(
                select={
                    'browser': "CASE WHEN user_agent LIKE '%%Chrome%%' THEN 'Chrome' WHEN user_agent LIKE '%%Firefox%%' THEN 'Firefox' WHEN user_agent LIKE '%%Safari%%' THEN 'Safari' ELSE 'Other' END"
                }
            ).values('browser').annotate(
                count=Count('id')
            ).order_by('-count')
            
            for browser in browsers:
                percentage = (browser['count'] / total_with_ua) * 100
                browser_stats.append({
                    'name': browser['browser'],
                    'count': browser['count'],
                    'percentage': round(percentage, 1)
                })
        
        # 按级别统计
        errors_by_level = {'error': 0, 'warning': 0, 'info': 0, 'debug': 0}
        level_stats = queryset.values('level').annotate(
            count=Count('id')
        )
        
        for stat in level_stats:
            errors_by_level[stat['level']] = stat['count']
        
        dashboard_data = {
            'totalErrors': total_errors,
            'errorRate': round(error_rate, 2),
            'affectedUsers': affected_users,
            'avgResponseTime': avg_response_time,
            'errorTrend': error_trend,
            'topErrors': top_errors,
            'browserStats': browser_stats,
            'errorsByLevel': errors_by_level
        }
        
        return ApiResponse.success(data=dashboard_data, message="获取仪表板数据成功")
    
    @action(detail=False, methods=['get'])
    def trend(self, request):
        """获取错误趋势数据"""
        project_id = request.query_params.get('project')
        time_range = request.query_params.get('time_range', '24h')
        metric = request.query_params.get('metric', 'errorCount')
        
        queryset = self.get_queryset()
        if project_id:
            queryset = queryset.filter(project_id=project_id)
        
        now = timezone.now()
        
        # 根据时间范围确定数据点
        if time_range == '1h':
            start_time = now - timedelta(hours=1)
            interval = timedelta(minutes=5)
            points = 12
        elif time_range == '24h':
            start_time = now - timedelta(hours=24)
            interval = timedelta(hours=1)
            points = 24
        elif time_range == '7d':
            start_time = now - timedelta(days=7)
            interval = timedelta(hours=6)
            points = 28
        else:  # 30d
            start_time = now - timedelta(days=30)
            interval = timedelta(days=1)
            points = 30
        
        trend_data = []
        for i in range(points):
            point_start = start_time + (interval * i)
            point_end = point_start + interval
            
            count = queryset.filter(
                timestamp__gte=point_start,
                timestamp__lt=point_end
            ).count()
            
            # 计算错误率（简化版本）
            error_rate = min((count / max(100, count)) * 100, 100)
            
            trend_data.append({
                'time': point_start.isoformat(),
                'errorCount': count,
                'errorRate': round(error_rate, 2),
                'type': 'errors'
            })
        
        return Response(trend_data)
    
    @action(detail=False, methods=['get'])
    def realtime(self, request):
        """获取实时错误数据"""
        project_id = request.query_params.get('project')
        queryset = self.get_queryset()
        
        if project_id:
            queryset = queryset.filter(project_id=project_id)
        
        # 获取最近5分钟的错误
        recent_time = timezone.now() - timedelta(minutes=5)
        recent_errors = queryset.filter(
            timestamp__gte=recent_time
        ).order_by('-timestamp')[:20]
        
        serializer = self.get_serializer(recent_errors, many=True)
        return Response(serializer.data)
    
    @action(detail=False, methods=['post'])
    def batch_delete(self, request):
        """批量删除错误日志"""
        ids = request.data.get('ids', [])
        if not ids:
            return Response(
                {'message': '请提供要删除的ID列表'},
                status=status.HTTP_400_BAD_REQUEST
            )
        
        deleted_count = ErrorLog.objects.filter(id__in=ids).delete()[0]
        return Response({
            'message': f'成功删除 {deleted_count} 条记录'
        })
    
    def create(self, request, *args, **kwargs):
        """创建错误日志"""
        try:
            logger.debug(f"收到错误日志创建请求: {request.data}")
            
            # 添加URL调试日志
            logger.error(f"[调试] 传入的URL: '{request.data.get('url')}'")
            
            serializer = self.get_serializer(data=request.data)
            if not serializer.is_valid():
                logger.error(f"数据验证失败: {serializer.errors}")
                return ApiResponse.error(
                    '数据验证失败',
                    ErrorCode.VALIDATION_ERROR,
                    serializer.errors,
                    status.HTTP_400_BAD_REQUEST
                )
            
            # 设置项目ID（从认证中获取）
            if hasattr(request, 'project'):
                serializer.validated_data['project'] = request.project
                logger.debug(f"设置项目: {request.project.name}")
            
            self.perform_create(serializer)
            created_id = getattr(serializer.instance, 'id', 'unknown')
            logger.info(f"错误日志创建成功: {created_id}")
            
            return ApiResponse.success(
                serializer.data,
                '错误日志创建成功',
                ErrorCode.SUCCESS,
                status.HTTP_201_CREATED
            )
            
        except Exception as e:
            logger.error(f"创建错误日志失败: {str(e)}")
            return ApiResponse.error(
                f"创建错误日志失败: {str(e)}", 
                ErrorCode.DATABASE_ERROR,
                http_status=status.HTTP_500_INTERNAL_SERVER_ERROR
            )