#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/3/15 16:49
# @Author  : 王凯
# @File    : henan_hnsrmzf_fbce_policy.py
# @Project : scrapy_spider

import json

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item, urlencode


class Henan_Hbszfhcxjsj_16B2_Policy(BaseTaxPolicySpider):
    name = "henan_hnsrmzf_fbce_policy"
    province: str = "河南省"  # 取表格
    city: str = ""  # 取表格
    county: str = ""  # 取表格
    park: str = ""  # 取表格
    source: str = "河南省人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.henan.gov.cn/zwgk/fgwj/yz/index.html"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        url = "https://searchapi.henan.gov.cn/open/api/external"
        params = {
            "keywords": "",
            "siteId": "4500000001",
            "allKeyword": "",
            "anyKeyword": "",
            "noKeyword": "",
            "searchRange": "-1000",
            "sortType": "200",
            "beginTime": "",
            "endTime": "",
            "pageNumber": "1",
            "pageSize": "15",
            "fileType": "3",
            "year": "",
            "channelMarkId": "45000000010115416542079063",
        }
        yield self.Request(url=url + "?" + urlencode(params), callback=self.parse, cb_kwargs={"params": params})
        params = {
            "keywords": "",
            "siteId": "4500000001",
            "allKeyword": "",
            "anyKeyword": "",
            "noKeyword": "",
            "searchRange": "-1000",
            "sortType": "200",
            "beginTime": "",
            "endTime": "",
            "pageNumber": "1",
            "pageSize": "15",
            "fileType": "3",
            "year": "",
            "channelMarkId": "45000000010115416542055799",
        }
        yield self.Request(url=url + "?" + urlencode(params), callback=self.parse, cb_kwargs={"params": params})

    def parse(self, response, **kwargs):
        data = json.loads(response.text)["data"]
        yield from self.parse_list(response, **kwargs)
        url = "https://searchapi.henan.gov.cn/open/api/external"
        for page in range(2, data["totalPage"] + 1):
            params = kwargs.get("params")
            params.update(
                {
                    "pageNumber": str(page),
                }
            )
            yield self.Request(url=url + "?" + urlencode(params), callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        datas = json.loads(response.text)["data"]["datas"]
        for data in datas:
            item = dict(
                title=data["title"],
                publish_date=data["pubDate"],
                content=data["content"],
                source_url=data["selfUrl"],
                source=self.source,
                province=self.province,
                city=self.city,
                county=self.county,
                park=self.park,
            )
            if item["publish_date"]:
                yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_hnsrmzf_fbce_policy".split())
