from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema, OpenApiParameter
from rest_framework import serializers
from utils.logger import logger

from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView

from news.crawlers.gxbh_crawler import GXBHCrawler


class CrawlNewsSuccessResponseSerializer(serializers.Serializer):
    success = serializers.BooleanField(default=True)
    count = serializers.IntegerField()


class CrawlNewsErrorResponseSerializer(serializers.Serializer):
    success = serializers.BooleanField(default=False)
    error = serializers.CharField()


class CrawlNewsView(APIView):
    @extend_schema(
        parameters=[
            OpenApiParameter(
                name="page",
                description="Page number",
                required=False,
                type=OpenApiTypes.INT,
                default=1,
            ),
            OpenApiParameter(
                name="category",
                description="News category",
                required=False,
                type=OpenApiTypes.STR,
                default="要闻",
            ),
            OpenApiParameter(
                name="limit",
                description="Number of news to crawl",
                required=False,
                type=OpenApiTypes.INT,
                default=10,
            ),
        ],
        responses={
            200: CrawlNewsSuccessResponseSerializer,
            500: CrawlNewsErrorResponseSerializer,
        },
        summary="Crawl news from the source",
        description="Triggers the news crawler to fetch and save news articles.",
    )
    def get(self, request):
        try:
            page = int(request.GET.get("page", 1))
            category = request.GET.get("category", "要闻")
            limit = int(request.GET.get("limit", 10))

            logger.info(f"开始爬取新闻，分类: {category or '全部'}，数量限制: {limit}")

            crawler = GXBHCrawler()
            count = crawler.crawl_and_save(page=page, category=category, limit=limit)

            return Response(
                {"success": True, "count": count}, status=status.HTTP_200_OK
            )
        except Exception as e:
            logger.error(f"爬取新闻时出错: {str(e)}")
            return Response(
                {"success": False, "error": str(e)},
                status=status.HTTP_500_INTERNAL_SERVER_ERROR,
            )
