# -*- coding: utf-8 -*-

import time
from datetime import datetime

import feapder
from feapder.utils.tools import jsonp2json
from loguru import logger


class GdSpider(feapder.AirSpider):
    __custom_setting__ = dict(
        LOG_LEVEL='WARNING',
        SPIDER_MAX_RETRY_TIMES=2,
        TASK_MAX_CACHED_SIZE=20
    )

    @staticmethod
    def get_input():
        """检查输入"""
        print('请输入日期范围，比如 ==> 20240501-20240505')
        try:
            start, end = input('你的输入：').split('-')
            datetime.strptime(start, '%Y%m%d')
            datetime.strptime(end, '%Y%m%d')
        except Exception as e:
            logger.error(e)
            return False
        else:
            return int(start), int(end)

    def need_stop(self, publish_date: str):
        """是否停止，即整个爬虫将停止"""
        publish_date = int(publish_date.replace('-', ''))
        if publish_date < self.time1:
            return True
        else:
            return False

    def need_exclude(self, publish_date: str):
        """是否排除，即不进入详情页"""
        publish_date = int(publish_date.replace('-', ''))
        if publish_date > self.time2:
            return True
        else:
            return False

    @staticmethod
    def get_params(page: int):
        params = {
            "callback": "jQuery06943350097319478_1715485411347",
            "page": page,
            "pagesize": "20",
            "isgkml": "1",
            "text": "",
            "order": "1",
            "including_url_doc": "1",
            "including_attach_doc": "1",
            "classify_main_name": "",
            "classify_mains": "",
            "classify_mains_excluded": "3491,3492,3493,4074,4168,4169",
            "position": "title",
            "_": int(time.time() * 1000)
        }
        return params

    def start_requests(self):
        result = self.get_input()
        if not result:
            logger.warning('输入有误，请重新输入')
            return
        self.time1, self.time2 = result

        list_url = "https://search.gd.gov.cn/jsonp/site/2"
        yield feapder.Request(
            url=list_url,
            callback=self.parse_list,
            params=self.get_params(1),
            page=1
        )

    def parse_list(self, request, response):
        logger.info(f'正在爬取第{request.page}页...')

        json_data = jsonp2json(response.text)
        items = json_data['results']
        for item in items:
            index_id = item['identifier']
            publish_org = item['publisher_src']
            publish_date = item['pub_time']

            if self.need_stop(publish_date) is True:
                logger.info('爬虫将停止')
                break

            if self.need_exclude(publish_date):
                continue

            title = item['title']
            url = item['url']

            yield feapder.Request(
                url=url,
                callback=self.parse_detail,
                index_id=index_id,
                publish_org=publish_org,
                publish_date=publish_date,
                title=title,
            )

        else:
            next_page = request.page + 1
            yield feapder.Request(
                url=request.url,
                callback=self.parse_list,
                params=self.get_params(next_page),
                page=next_page
            )

    def parse_detail(self, request, response):
        content = response.xpath('//div[@class="article-content"]//text()').getall()
        content = ''.join(content)

        attachment_link = response.xpath('//a[contains(text(), "附件")]/@href').get('')
        attachment_name = response.xpath('//a[contains(text(), "附件")]/@alt').get('')

        result = {
            '发布日期': request.publish_date,
            '政策标题': request.title,
            '发布机构': request.publish_org,
            '索引号': request.index_id,
            '政策正文文本': content,
            '地址': request.url,
            '附件名称': attachment_name,
            '附件链接': attachment_link
        }
        logger.success(result)


if __name__ == "__main__":
    # 爬虫速度为5
    GdSpider(thread_count=5).start()
