import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import requests
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "sichuan_ybsxwxrmzfgfxwj"

    province: str = "四川省"  # 取表格
    city: str = "宜宾市"  # 取表格
    county: str = "兴文县"  # 取表格
    park: str = "None"  # 取表格
    source: str = "宜宾市兴文县人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.scxw.gov.cn/zwgk/zfxxgk/zcwjk1/index.html"  # 注明入口网址，以便后续排错
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/json",
        "Origin": "https://www.scxw.gov.cn",
        "Pragma": "no-cache",
        "Referer": "https://www.scxw.gov.cn/zwgk/zfxxgk/zcwjk1/index.html",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": "\"Not(A:Brand\";v=\"99\", \"Google Chrome\";v=\"133\", \"Chromium\";v=\"133\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    base_url = "https://www.scxw.gov.cn/irs/front/list"
    value_list = [
        # [
        #     {
        #         "property": "channel_id",
        #         "operator": "eq",
        #         "value": "15768"
        #     }
        # ],
        # [
        #     {
        #         "property": "channel_id",
        #         "operator": "eq",
        #         "value": "15769"
        #     }
        # ],
        # [
        #     {
        #         "property": "channel_id",
        #         "operator": "eq",
        #         "value": "15770"
        #     }
        # ],
        # [
        #     {
        #         "property": "channel_id",
        #         "operator": "eq",
        #         "value": "15771"
        #     }
        # ],
        # [
        #     {
        #         "property": "channel_id",
        #         "operator": "eq",
        #         "value": "15773"
        #     }
        # ]

        [
            {
                "property": "f_2024826520105",
                "operator": "eq",
                "value": "行政规范性文件"
            },
            {
                "property": "f_2024108451914",
                "operator": "eq",
                "value": "县（市、区）政府及办公室"
            }
        ],
        [
            {
                "property": "f_2024826520105",
                "operator": "eq",
                "value": "行政规范性文件"
            },
            {
                "property": "f_2024108451914",
                "operator": "eq",
                "value": "县级部门"
            }
        ],
        # [
        #     {
        #         "property": "channel_id",
        #         "operator": "eq",
        #         "value": "15786"
        #     }
        # ]
    ]

    def start_requests(self):
        url = self.base_url
        for value in self.value_list:
            payload = {
                "code": "18808a48fad",
                "sorts": [
                    {
                        "sortField": "save_time",
                        "sortOrder": "DESC"
                    }
                ],
                "granularity": "ALL",
                "pageSize": 10,
                "pageNo": 1,
                "tableName": "t_18798d1afb0",
                "beginDateTime": "",
                "endDateTime": "",
                "customFilter": {
                    "operator": "and",
                    "properties": value,
                    "filters": [
                        {
                            "operator": "or",
                            "properties": [
                                {
                                    "property": "channel_id",
                                    "operator": "eq",
                                    "value": 15768
                                },
                                {
                                    "property": "channel_id",
                                    "operator": "eq",
                                    "value": 15769
                                },
                                {
                                    "property": "channel_id",
                                    "operator": "eq",
                                    "value": 15770
                                },
                                {
                                    "property": "channel_id",
                                    "operator": "eq",
                                    "value": 15771
                                },
                                {
                                    "property": "channel_id",
                                    "operator": "eq",
                                    "value": 15773
                                },
                                {
                                    "property": "channel_id",
                                    "operator": "eq",
                                    "value": 15775
                                },
                                {
                                    "property": "channel_id",
                                    "operator": "eq",
                                    "value": 15786
                                }
                            ]
                        }
                    ]
                }
            }
            response = requests.request("POST", url, headers=self.headers, data=json.dumps(payload))
            total_num = json.loads(response.text)['data']['pager']['total']
            rows = json.loads(response.text)['data']['pager']['pageSize']
            pages = json.loads(response.text)['data']['pager']['pageCount']
            print("总页数：", pages, "总条数：", total_num)
            for page in range(1, pages + 1):
                payload['pageNo'] = str(page)
                # 发送GET请求
                yield scrapy.FormRequest(url, method='POST', headers=self.headers, body=json.dumps(payload),
                                         callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        item_list = json.loads(response.text)['data']['list']

        for item in item_list:
            title = item['f_202356571807_ext']
            url = item['doc_pub_url']
            publish_time = item['f_2023710729988']
            d = {'url': url, 'title': title, 'publish_date': publish_time}
            print("ddddd:", d)
            yield scrapy.Request(url=url, method='GET', headers=self.headers, callback=self.parse_detail, meta=d)

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response=None, **kwargs):
        item = Item()
        item['title'] = response.meta.get('title')
        item['source_url'] = response.url
        item['publish_date'] = response.meta.get('publish_date')
        item['content'] = response.xpath(".").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        if '已失效' in item['title'] or '已废止' in item['title']:
            item['state'] = '0'
        else:
            item['state'] = None
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sichuan_ybsxwxrmzfgfxwj".split())
