import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "dalian_dlszsqrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "大连市"  # 取表格
    county: str = "中山区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "大连市中山区人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.dlzs.gov.cn/ztlm/#/government-info-open?index=3"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.dlzs.gov.cn/external/affairs/getPublicDocumentList"

        payload = {
            "pagination": {
                "current": 1,
                "pagesize": 20,
                "total": 0
            },
            "query": {
                "label": "",
                "state": "1",
                "redTop": "3"
            }
        }

        headers = {
            'Content-Type': 'application/json'
        }

        # 将 payload 转换为 JSON 字符串
        json_payload = json.dumps(payload)

        yield scrapy.Request(
            url=url,
            method='POST',
            body=json_payload,
            headers=headers,
            callback=self.detail_requests
        )

    def detail_requests(self, response, **kwargs):
        json_list = json.loads(response.text)
        pages = math.ceil(json_list['pagination']['total']/json_list['pagination']['pagesize'])

        notice_list = json_list['datas']
        for notice in notice_list:
            notice_id = notice['id']
            link = f'https://www.dlzs.gov.cn/external/affairs/getPublicDocumentInfo/{notice_id}'
            print(link)
            payload = {}

            yield scrapy.Request(
                url=link,
                method='POST',
                body=json.dumps(payload),  # 将空字典转换为 JSON 字符串
                headers={
                    'Content-Type': 'application/json'
                },
                callback=self.parse_detail,
                meta={'notice_id': notice_id}
            )

        print("总页数：", pages)
        if pages > 1:
            if response.meta.get("is_next") is not False:
                for page in range(2, pages+1):
                    url = "https://www.dlzs.gov.cn/external/affairs/getPublicDocumentList"

                    payload = {
                        "pagination": {
                            "current": page,
                            "pagesize": 20,
                            "total": 0
                        },
                        "query": {
                            "label": "",
                            "state": "1",
                            "redTop": "3"
                        }
                    }

                    headers = {
                        'Content-Type': 'application/json'
                    }

                    # 将 payload 转换为 JSON 字符串
                    json_payload = json.dumps(payload)

                    yield scrapy.Request(
                        url=url,
                        method='POST',
                        body=json_payload,
                        headers=headers,
                        callback=self.detail_requests,
                        meta={'is_next': False}
                    )

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        notice_detaiil = json.loads(response.text)
        # print(response.text)
        no_id = response.meta.get('notice_id')
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = notice_detaiil['data']['label']
        item['source_url'] = f"https://www.dlzs.gov.cn/#/gov-file?id={no_id}&currentLocation=&level1=%E6%94%BF%E5%BA%9C%E4%BF%A1%E6%81%AF%E5%85%AC%E5%BC%80"
        item['publish_date'] = notice_detaiil['data']['releaseDate']
        item['content'] = notice_detaiil['data']['body']
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_dlszsqrmzf".split())
