import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "dalian_dlsjzqjpxqrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "大连市"  # 取表格
    county: str = "金州区(金普新区)"  # 取表格
    park: str = "None"  # 取表格
    source: str = "大连市金州区(金普新区)人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.dljp.gov.cn/govxxgk/xxgk.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.dljp.gov.cn/EpointWebBuilder_Dlpx/zNJSAction.action?cmd=getGovInfoListByCategoryNum"

        # 构建POST请求的参数
        payload = {
            "deptcode": "001",
            "categoryNum": "001001001",
            "pageIndex": "0",
            "pageSize": "15",
            "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a"
        }

        headers = {
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        }

        # 构造POST请求
        yield scrapy.Request(
            url=url,
            method='POST',
            body=urlencode(payload),  # 将字典转换为编码后的字符串
            headers=headers,
            callback=self.detail_requests
        )

    def detail_requests(self, response, **kwargs):
        notice_info = json.loads(response.text)
        # print("notice_info", json.loads(notice_info['custom'])['data'])
        # print("total",json.loads(notice_info['custom'])['data'][0]['total'])

        total_num = int(json.loads(notice_info['custom'])['data'][0]['total'])
        info_list = json.loads(notice_info['custom'])['data'][0]['infolist']
        for info in info_list:
            real_url = f"https://www.dljp.gov.cn/govxxgk{info['infourl']}"
            print("real_url:", real_url)
            item = Item()
            item['title'] = info['title']
            item['publish_date'] = info['infodate']
            yield response.follow(real_url, callback=self.parse_detail, meta={'item': item})
        # 翻页逻辑
        if total_num > 15:
            pages = math.ceil(total_num / 15)
            print("总页数：", pages)
            if response.meta.get("is_next") is not False:
                for page in range(2, pages + 1):
                    print("当前页：", page)
                    url = "https://www.dljp.gov.cn/EpointWebBuilder_Dlpx/zNJSAction.action?cmd=getGovInfoListByCategoryNum"

                    # 构建POST请求的参数
                    payload = {
                        "deptcode": "001",
                        "categoryNum": "001001001",
                        "pageIndex": f"{page-1}",
                        "pageSize": "15",
                        "siteGuid": "7eb5f7f1-9041-43ad-8e13-8fcb82ea831a"
                    }

                    headers = {
                        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
                    }

                    # 构造POST请求
                    yield scrapy.Request(
                        url=url,
                        method='POST',
                        body=urlencode(payload),  # 将字典转换为编码后的字符串
                        headers=headers,
                        callback=self.detail_requests,
                        meta={'is_next': False}
                    )

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = response.meta.get('item')
        item['source_url'] = response.url
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_dlsjzqjpxqrmzf".split())
