import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "dalian_dlsfzhggwyh"

    province: str = "辽宁省"  # 取表格
    city: str = "大连市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "大连市发展和改革委员会"  # 取表格 同一个来源合并
    url: str = "https://pc.dl.gov.cn/col/col1683/index.html?vc_xxgkarea=2102000056&jh=261"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://pc.dl.gov.cn/module/xxgk/search.jsp"
        params = {
            "divid": "div4",
            "jdid": "2",
            "area": "2102000056",
            "compaltedate:0,orderid:1": "",
            "infotypeId": "DL00304",
            "isAllList": "1",
            "standardXxgk": "1"
        }

        headers = {
            'Content-Type': 'application/x-www-form-urlencoded',  # 表单数据类型
        }

        # 将参数转换为 URL 编码的字符串
        encoded_params = urllib.parse.urlencode(params)

        # 发送 POST 请求
        yield scrapy.Request(
            url=url,
            method='POST',
            body=encoded_params,
            headers=headers,
            callback=self.detail_requests
        )

    def detail_requests(self, response, **kwargs):
        pattern = r"共&nbsp;\s*(\d+)\s*&nbsp;页"
        # 查找所有匹配项
        pages = int(re.findall(pattern, response.text)[0])
        links = response.xpath('//div[@class="zfxxgk_item"]//ul/li/a/@href').getall()
        print("链接数量", len(set(links)))
        for link in links:
            print(link)

            yield response.follow(link, callback=self.parse_detail)

        if pages > 1:
            print("总页数", pages)

        if response.meta.get("is_next") is not False:
            for page in range(2, pages + 1):
                url = "https://pc.dl.gov.cn/module/xxgk/search.jsp"
                payload = {
                    "infotypeId": "DL00304",
                    "jdid": "2",
                    "area": "2102000056",
                    "divid": "div4",
                    "vc_title": "",
                    "vc_number": "",
                    "sortfield": "compaltedate:0,orderid:1",
                    "currpage": "2",
                    "vc_filenumber": "",
                    "vc_all": "",
                    "texttype": "",
                    "fbtime": "",
                    "standardXxgk": "1",
                    "isAllList": "1",
                    "texttype": "",
                    "fbtime": "",
                    "vc_all": "",
                    "vc_filenumber": "",
                    "vc_title": "",
                    "vc_number": "",
                    "currpage": "2",
                    "sortfield": "compaltedate:0,orderid:1",
                    "fields": "",
                    "fieldConfigId": "",
                    "hasNoPages": "",
                    "infoCount": ""
                }
                headers = {
                    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
                }

                # 将参数转换为 URL 编码的字符串
                encoded_payload = urllib.parse.urlencode(payload)

                # 发送 POST 请求
                yield response.follow(
                    url=url,
                    method='POST',
                    body=encoded_payload,
                    headers=headers,
                    callback=self.detail_requests,
                    meta={'is_next': False}
                )

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.xpath("""string(//div[contains(@class, 'gk-bq')]/ul[3]/li[2])""").get()
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_dlsfzhggwyh".split())
