import time
import re
import json
import requests
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "sichuan_mssrmzf"

    province: str = "四川省"  # 取表格
    city: str = "眉山市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "眉山市人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.ms.gov.cn/zwgk/zfwj.htm"  # 注明入口网址，以便后续排错
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Authorization": "preview",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/json;charset=UTF-8",
        "Origin": "https://www.ms.gov.cn",
        "Pragma": "no-cache",
        "Referer": "https://www.ms.gov.cn/zwgk/zfwj.htm",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": "\"Not A(Brand\";v=\"8\", \"Chromium\";v=\"132\", \"Google Chrome\";v=\"132\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    typeRecord_list = [
        # "眉府发"
        "市政府令", "眉府发", "眉府规", "眉府函", "眉府通", "眉府办发", "眉府办规", "眉府办函"
    ]

    def start_requests(self):
        for typeRecord in self.typeRecord_list:
            url = "https://www.ms.gov.cn/openlab_web/webber/search/search/search/queryPage"
            data = {
                "aliasName": "open_data",
                "searchType": "string",
                "orderType": "score",
                "searchDateType": "custom",
                "beginDate": "",
                "endDate": "",
                "auditing": [
                    "1"
                ],
                "owner": "971045726",
                "columnId": "6881,6849,6850,5781,5659,5447,5929,5601,5798,7182,7299,7309,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386",
                "keyWord": "",
                "page": {
                    "current": 0,
                    "size": 10,
                    "total": 0,
                    "totalPage": 0,
                    "indexs": []
                },
                "field": [],
                "filter": {
                    "typeRecord": f"{typeRecord}"
                }
            }
            data = json.dumps(data, separators=(',', ':'))
            response = requests.post(url, headers=self.headers, data=data)

            total_num = json.loads(response.text)['data']['page']['total']
            size = json.loads(response.text)['data']['page']['size']
            pages = int(math.ceil(int(total_num) / int(size)))
            print("总页数：", pages, "总条数：", total_num)
            for page in range(0, pages + 1):
                data = {
                    "aliasName": "open_data",
                    "searchType": "string",
                    "orderType": "score",
                    "searchDateType": "custom",
                    "beginDate": "",
                    "endDate": "",
                    "auditing": [
                        "1"
                    ],
                    "owner": "971045726",
                    "columnId": "6881,6849,6850,5781,5659,5447,5929,5601,5798,7182,7299,7309,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386",
                    "keyWord": "",
                    "page": {
                        "current": page,
                        "size": 10,
                        "total": 0,
                        "totalPage": 0,
                        "indexs": []
                    },
                    "field": [],
                    "filter": {
                        "typeRecord": f"{typeRecord}"
                    }
                }
                # 发送GET请求
                yield scrapy.Request(url=url, method='POST', body=json.dumps(data, separators=(',', ':')),
                                     headers=self.headers,
                                     callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        record_list = json.loads(response.text)['data']['page']['records']

        for record in record_list:
            title = record['title']
            url = record['url']
            publish_time = record['createDate']
            d = {'url': url, 'title': title, 'publish_date': publish_time}
            yield scrapy.Request(url=url, method='GET', headers=self.headers, callback=self.parse_detail, meta=d)

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response=None, **kwargs):
        item = Item()
        item['title'] = response.meta.get('title')
        item['source_url'] = response.url
        item['publish_date'] = response.meta.get('publish_date')
        item['content'] = response.xpath(".").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        if '已失效' in item['title'] or '已废止' in item['title']:
            item['state'] = '0'
        else:
            item['state'] = None
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sichuan_mssrmzf".split())
