import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "fushun_fssrlzyhshbzj"

    province: str = "辽宁省"  # 取表格
    city: str = "抚顺市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "抚顺市人力资源和社会保障局"  # 取表格 同一个来源合并
    url: str = "https://fsrs.fushun.gov.cn/dynamic/zwgk/jd_govInfoPub.html?categorynum=003013"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://fushun.gov.cn/EWB-FRONT/rest/lightfrontaction/getgovinfolist"
        # 设置请求头
        headers = {
            'Content-Type': 'application/json'
        }

        # 设置POST数据
        payload = {
            "deptcode": "",
            "categorynum": "003001003",
            "pageIndex": 0,
            "pageSize": 20,
            "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a"
        }

        # 发送POST请求
        yield scrapy.FormRequest(
            url=url,
            method='POST',
            body=json.dumps(payload),
            headers=headers,
            callback=self.detail_requests
        )

    def detail_requests(self, response, **kwargs):
        data_list = json.loads(response.text)['custom']['data']    # 取链接
        total_num = json.loads(response.text)['custom']['total']
        for data in data_list:
            # 'https://fushun.gov.cn/govxxgk/fushun/2024-02-29/1e285651-b61d-4551-8f37-5bee2b46a7fc.html'
            link = "https://fushun.gov.cn" + data['infourl']
            pub_date = data['infodate']
            tit = data['title']
            yield scrapy.Request(url=link, callback=self.parse_detail, meta={'pub_date': pub_date, 'tit': tit})

        # 翻页逻辑
        if response.meta.get("is_next") is not False:
            pages = math.ceil(total_num / 20)
            if pages > 1:
                print("总页数：", pages)
                for page in range(2, pages + 1):
                    print("当前页：", page)
                    url = "https://fushun.gov.cn/EWB-FRONT/rest/lightfrontaction/getgovinfolist"
                    # 设置请求头
                    headers = {
                        'Content-Type': 'application/json'
                    }

                    # 设置POST数据
                    payload = {
                        "deptcode": "",
                        "categorynum": "003001003",
                        "pageIndex": page-1,
                        "pageSize": 20,
                        "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a"
                    }

                    # 发送POST请求
                    yield scrapy.FormRequest(
                        url=url,
                        method='POST',
                        body=json.dumps(payload),
                        headers=headers,
                        callback=self.detail_requests,
                        meta={'is_next': False}
                    )

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        item['title'] = response.meta.get('tit')
        item['source_url'] = response.url
        item['publish_date'] = response.meta.get('pub_date')
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl fushun_fssrlzyhshbzj".split())
