import pickle
import json
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
# from django.core.cache import caches
from django_redis import get_redis_connection
from django.utils import timezone
from .models import CrawlerConfig, CrawlerCookiesAccount, CrawlerStatus, ProxyConfig
from .serializers import CrawlerConfigSerializer, CrawlerCookiesAccountSerializer, TaskListSerializer, ProxyConfigSerializer
from .tasks import start_crawler_task
from rest_framework.exceptions import ValidationError
from rest_framework import generics
from .strategies import DeleteStrategyFactory, BaseDataQueryStrategy
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
cache_status = get_redis_connection('cache_status')

class CrawlerConfigViewSet(viewsets.ModelViewSet):
    """
    用于查看和编辑 CrawlerConfig 实例的视图集。
    """
    serializer_class = CrawlerConfigSerializer
    queryset = CrawlerConfig.objects.all()
    lookup_field = 'task_id'
    permission_classes = [IsAuthenticated]

    def perform_create(self, serializer):
        """
        创建CrawlerConfig实例时的校验和保存。
        根据crawler_type校验重复项。
        """
        user = self.request.user
        data = serializer.validated_data
        platform_name = data.get('platform_name')
        crawler_type = data.get('crawler_type')
        
        # 校验字段映射
        check_fields = {
            'search': ('keyword', '关键字'),
            'detail': ('id_list', 'ID'),
            'creator': ('creator_list', '创作者')
        }
        
        if crawler_type not in check_fields:
            raise ValidationError({'message': f'不支持的爬虫类型：{crawler_type}'})
        
        field, field_name = check_fields[crawler_type]
        value = data.get(field)
        
        if value:
            if field in ['id_list', 'creator_list']:
                new_items = set(json.loads(value))
                existing_configs = CrawlerConfig.objects.filter(
                    user=user,
                    platform_name=platform_name,
                    crawler_type=crawler_type
                )
                
                for config in existing_configs:
                    existing_items = set(json.loads(getattr(config, field))) if getattr(config, field) else set()
                    if new_items & existing_items:
                        raise ValidationError({'message': f'检测到{field_name}列表中存在重复项！'})
            elif CrawlerConfig.objects.filter(
                user=user,
                platform_name=platform_name,
                crawler_type=crawler_type,
                **{field: value}
            ).exists():
                raise ValidationError({'message': f'请勿重复创建相同{field_name}的爬虫任务！'})
        
        serializer.save(user=user)

    def get_queryset(self):
        """
        仅返回当前用户关联的 CrawlerConfig 实例。
        """
        return CrawlerConfig.objects.filter(user=self.request.user)
    
    @action(detail=False, methods=['get'], url_path='test-cache', url_name='test_cache')
    def test_cache(self, request):
        # cache_status.set("logger", "test")
        name = cache_status.get("name").decode('utf-8')
        print(name)
        return Response(name,status=status.HTTP_200_OK)

    @action(detail=True, methods=['post'], url_path='start-crawler', url_name='start_crawler')
    def start_crawler(self, request, task_id=None):
        """
        启动爬虫任务。
        前端需要传递 task_id，后端根据 task_id 启动 Celery 任务。
        """
        crawler_config = self.get_object()
        platform_name = crawler_config.platform_name
        user = request.user

        # 检查用户的 CookiesAccount 是否有对应的 platform_name 信息
        if not CrawlerCookiesAccount.objects.filter(user=user, platform_name=platform_name).exists():
            return Response(
                {'message': '请移步配置管理配置该平台的账号信息!!!'},
                status=status.HTTP_400_BAD_REQUEST
            )

        # 如果开启了代理，检查是否配置了代理信息
        if crawler_config.is_proxy_enabled:
            proxy_config = ProxyConfig.objects.filter(user=user).first()
            if not proxy_config or not all([
                proxy_config.username,
                proxy_config.password,
                proxy_config.secret_id,
                proxy_config.secret_key
            ]):
                return Response(
                    {'message': '请移步配置管理快代理配置完善代理信息!!!'},
                    status=status.HTTP_400_BAD_REQUEST
                )

        # 检查当前登录用户该配置的 platform_name 是否有任务实例状态为 Running
        if CrawlerStatus.objects.filter(task_id__user=user, task_id__platform_name=platform_name, crawler_status='Running').exists():
            return Response(
                {'message': '同一平台不支持并发获取数据，请等待当前任务完成!!!'},
                status=status.HTTP_400_BAD_REQUEST
            )

        # 获取代理配置信息
        proxy_info = None
        if crawler_config.is_proxy_enabled:
            proxy_config = ProxyConfig.objects.filter(user=user).first()
            if proxy_config:
                proxy_info = {
                    'username': proxy_config.username,
                    'password': proxy_config.password,
                    'secret_id': proxy_config.secret_id,
                    'secret_key': proxy_config.secret_key
                }

        # 启动 Celery 任务
        start_crawler_task.delay(str(crawler_config.task_id), crawler_config.user_id, proxy_info)
        # 创建 CrawlerStatus 实例，设置状态为 Running
        CrawlerStatus.objects.update_or_create(
            task_id=crawler_config,
            defaults={
                'crawler_status': 'Running',
                'start_time': timezone.now(),
                'crawler_status_logs': None
            }
        )
        return Response(
            {'status': 'Crawler started', 'task_id': crawler_config.task_id},
            status=status.HTTP_202_ACCEPTED
        )

    @action(detail=True, methods=['get'], url_path='status', url_name='status')
    def get_status(self, request, task_id=None):
        """
        获取爬虫任务的当前状态和日志信息。
        前端通过轮询此接口来实时获取状态。
        """
        try:
            # 通过 task_id 获取 CrawlerConfig 实例
            crawler_config = CrawlerConfig.objects.get(task_id=task_id)
        except CrawlerConfig.DoesNotExist:
            return Response({'error': 'Task not found'}, status=status.HTTP_404_NOT_FOUND)

        # 获取爬虫状态
        serialized_status = cache_status.get(task_id)
        # 获取爬虫日志
        serialized_log = cache_status.get("logger")

        if serialized_status and serialized_log:
            # 解码状态和日志信息
            crawler_status = serialized_status.decode('utf-8').strip('"')  #  JSON 序列化，字符串会被自动加上双引号
            crawler_status_logs = serialized_log.decode('utf-8').strip('"')
            # 获取或创建 CrawlerStatus 实例
            crawler_status_instance, created = CrawlerStatus.objects.get_or_create(
                task_id=crawler_config,
                defaults={
                    'crawler_status': crawler_status,
                    'crawler_status_logs': crawler_status_logs,
                    'start_time': timezone.now(),
                }
            )
            # 更新完成时间和状态信息
            if not created:
                crawler_status_instance.crawler_status = crawler_status
                crawler_status_instance.crawler_status_logs = crawler_status_logs
                crawler_status_instance.end_time = timezone.now()
                crawler_status_instance.save()
            data = {
                'crawler_status': crawler_status,
                'crawler_status_logs': crawler_status_logs,
            }
        else:
            data = {
                'crawler_status': "Running",
                'crawler_status_logs': "正在运行中.....",
            }

        return Response(data, status=status.HTTP_200_OK)

    @action(detail=True, methods=['delete'])
    def delete_with_data(self, request, task_id=None):
        try:
            config = self.get_object()
        except CrawlerConfig.DoesNotExist:
            return Response({'message': 'Crawler configuration not found.'}, status=status.HTTP_404_NOT_FOUND)

        platform = config.platform_name
        delete_strategy = DeleteStrategyFactory.create_strategy(platform)

        delete_strategy.delete_content(task_id)
        delete_strategy.delete_comments(task_id)
        config.delete()

        return Response({'message': 'Crawler configuration and associated data deleted successfully.'}, status=status.HTTP_200_OK)

    @action(detail=False, methods=['get'], url_path='retrieve-details', url_name='retrieve_details')
    def retrieve_details(self, request):
        """
        根据前端传递的 task_id，返回对应的 CrawlerConfig 配置和所有关联的 CrawlerStatus 状态信息。
        """
        task_id = request.query_params.get('task_id', None)
        if not task_id:
            return Response(
                {'error': 'task_id 参数是必需的。'},
                status=status.HTTP_400_BAD_REQUEST
            )
        
        # 获取对应的 CrawlerConfig 实例
        crawler_config = get_object_or_404(CrawlerConfig, task_id=task_id, user=request.user)
        
        # 序列化数据
        serializer = self.get_serializer(crawler_config)
        
        return Response(serializer.data, status=status.HTTP_200_OK)


class CrawlerCookiesAccountViewSet(viewsets.ModelViewSet):
    """
    用于查看和编辑 CrawlerCookiesAccount 实例的视图集。
    """
    serializer_class = CrawlerCookiesAccountSerializer
    queryset = CrawlerCookiesAccount.objects.all()
    permission_classes = [IsAuthenticated]

    def get_queryset(self):
        """
        仅返回当前用户关联的 CrawlerCookiesAccount 实例。
        """
        return CrawlerCookiesAccount.objects.filter(user=self.request.user)

    def perform_create(self, serializer):
        """
        在创建 CrawlerCookiesAccount 实例时，将 user 关联到当前登录用户。
        """
        serializer.save(user=self.request.user)

    def perform_update(self, serializer):
        """
        在更新 CrawlerCookiesAccount 实例时，确保 user 关联到当前登录用户。
        """
        serializer.save(user=self.request.user)

    def create(self, request, *args, **kwargs):
        """
        创建一个新的 CrawlerCookiesAccount 实例。
        用户需要提供 account_name、platform_name 和 cookies。
        """
        user = request.user
        platform_name = request.data.get('platform_name')
        account_name = request.data.get('account_name')
        account, created = CrawlerCookiesAccount.objects.get_or_create(
            user=user,
            platform_name=platform_name,
            account_name=account_name,
            defaults=request.data
        )
        if not created:
            # 如果对象已存在，更新记录
            serializer = self.get_serializer(account, data=request.data, partial=True)
            serializer.is_valid(raise_exception=True)
            self.perform_update(serializer)
            return Response(serializer.data, status=status.HTTP_200_OK)
        else:
            # 如果对象是新创建的，返回创建的对象
            serializer = self.get_serializer(account)
            headers = self.get_success_headers(serializer.data)
            return Response(serializer.data, status=status.HTTP_200_OK, headers=headers)

    def update(self, request, *args, **kwargs):
        """
        更新一个已有的 CrawlerCookiesAccount 实例。
        """
        partial = kwargs.pop('partial', False)
        instance = self.get_object()
        serializer = self.get_serializer(instance, data=request.data, partial=partial)
        # 设置status为有效（0）
        instance.status = 0
        serializer.is_valid(raise_exception=True)
        self.perform_update(serializer)

        return Response({
            "code": 0,
            "message": "更新成功"
        }, status=status.HTTP_200_OK)

    def destroy(self, request, *args, **kwargs):
        """
        删除一个 CrawlerCookiesAccount 实例。
        """
        return super().destroy(request, *args, **kwargs)


class TaskListView(generics.ListAPIView):
    serializer_class = TaskListSerializer
    permission_classes = [IsAuthenticated]
    def get_queryset(self):
        user = self.request.user
        platform_name = self.request.query_params.get('platform_name', None)
        queryset = CrawlerConfig.objects.filter(user=user)
        if platform_name:
            queryset = queryset.filter(platform_name=platform_name)
        return queryset

class PlatformDataView(APIView):
    """平台数据查询视图"""
    
    def get(self, request, task_id):
        """获取平台数据"""
        platform_name = request.query_params.get('platform_name')
        if not platform_name:
            return Response({
                'code': 400,
                'message': '平台名称不能为空'
            })
            
        try:
            strategy = BaseDataQueryStrategy.get_strategy(platform_name)
            if not strategy:
                return Response({
                    'code': 400,
                    'message': f'不支持的平台: {platform_name}'
                })
            # 使用策略类获取数据
            result = strategy.get_platform_data(task_id)
            return Response(result)
            
        except Exception as e:
            return Response({
                'code': 500,
                'message': f'获取平台数据失败: {str(e)}'
            })

class ProxyConfigViewSet(viewsets.ModelViewSet):
    """
    用于查看和编辑 ProxyConfig 实例的视图集。
    """
    serializer_class = ProxyConfigSerializer
    queryset = ProxyConfig.objects.all()
    permission_classes = [IsAuthenticated]

    def get_queryset(self):
        """
        仅返回当前用户关联的 ProxyConfig 实例。
        """
        return ProxyConfig.objects.filter(user=self.request.user)

    def perform_create(self, serializer):
        """
        创建时关联当前用户
        """
        serializer.save(user=self.request.user)

    def create(self, request, *args, **kwargs):
        """
        创建或更新代理配置
        """
        user = request.user
        provider = request.data.get('provider', 'kuaidaili')
        
        # 尝试获取或创建配置
        proxy_config, created = ProxyConfig.objects.get_or_create(
            user=user,
            provider=provider,
            defaults=request.data
        )
        
        if not created:
            # 更新现有配置
            serializer = self.get_serializer(proxy_config, data=request.data, partial=True)
            serializer.is_valid(raise_exception=True)
            self.perform_update(serializer)
            proxy_config.status = 0  # 设置为生效中
            proxy_config.save()
        else:
            serializer = self.get_serializer(proxy_config)
            
        headers = self.get_success_headers(serializer.data)
        return Response(serializer.data, status=status.HTTP_200_OK, headers=headers)