import time
import re
import json
import requests
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "sichuan_dzsrmzf"

    province: str = "四川省"  # 取表格
    city: str = "达州市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "达州市人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.dazhou.gov.cn/xxgk-list-zhengce.html"  # 注明入口网址，以便后续排错
    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cache-control': 'no-cache',
        'pragma': 'no-cache',
        'priority': 'u=0, i',
        'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'none',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
    }
    doc_list = [
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=62&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=61&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=60&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=594&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=593&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=592&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=591&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=59&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=58&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=523&&keyword=&&page=1&&pagesize=10",
        "https://www.dazhou.gov.cn/dzserachs.html?module=xxgk&&title=&&wenhao=&&catid=487&&keyword=&&page=1&&pagesize=10",
    ]

    def start_requests(self):
        for url in self.doc_list:
            payload = {}
            response = requests.request("GET", url, headers=self.headers, data=payload)

            total_num = json.loads(response.text)['total']
            rows = 10
            pages = int(math.ceil(int(total_num) / int(rows)))
            print("总页数：", pages, "总条数：", total_num)
            url_left = url.split('page=1')[0]
            url_right = url.split('page=1')[1]
            for page in range(1, pages + 1):
                url = url_left + "page=" + str(page) + url_right
                # 发送GET请求
                yield scrapy.Request(url=url, method='GET', headers=self.headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        item_list = json.loads(response.text)['data']

        for item in item_list:
            title = item['title']
            url = item['url']
            publish_time = item['inputtime']
            d = {'url': url, 'title': title, 'publish_date': publish_time}
            yield scrapy.Request(url=url, method='GET', headers=self.headers, callback=self.parse_detail, meta=d)

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response=None, **kwargs):
        item = Item()
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = response.xpath(".").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        if '已失效' in item['title'] or '已废止' in item['title']:
            item['state'] = '0'
        else:
            item['state'] = None
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sichuan_dzsrmzf".split())
