import time
import re
import json
import requests
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "heilongjiang_dqsrmzfqt"

    province: str = "黑龙江省"  # 取表格
    city: str = "大庆市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "大庆市人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.daqing.gov.cn/daqing/c100375/zfxxgk_list.shtml"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.daqing.gov.cn/common/search/558411e790de456192a2be8f052b7382?page=1&_pageSize=15&_isAgg=true&_isJson=true&_template=index&_rangeTimeGte=&_channelName="

        payload = {}
        headers = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Pragma': 'no-cache',
            'Referer': 'https://www.daqing.gov.cn/daqing/c100375/zfxxgk_list.shtml',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest',
            'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"'
        }

        response = requests.request("GET", url, headers=headers, data=payload)

        total_num = json.loads(response.text)['data']['total']
        rows = json.loads(response.text)['data']['rows']
        pages = int(math.ceil(float(total_num) / float(rows)))
        print("总页数：", pages, "总条数：", total_num)
        for page in range(1, pages + 1):
            url = f"https://www.daqing.gov.cn/common/search/558411e790de456192a2be8f052b7382?page={page}&_pageSize=15&_isAgg=true&_isJson=true&_template=index&_rangeTimeGte=&_channelName="

            headers = {
                'Accept': 'application/json, text/javascript, */*; q=0.01',
                'Accept-Language': 'zh-CN,zh;q=0.9',
                'Cache-Control': 'no-cache',
                'Connection': 'keep-alive',
                'Pragma': 'no-cache',
                'Referer': 'https://www.daqing.gov.cn/daqing/c100375/zfxxgk_list.shtml',
                'Sec-Fetch-Dest': 'empty',
                'Sec-Fetch-Mode': 'cors',
                'Sec-Fetch-Site': 'same-origin',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
                'X-Requested-With': 'XMLHttpRequest',
                'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"Windows"'
            }

            # 发送GET请求
            yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        item_list = json.loads(response.text)['data']['results']
        headers = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Pragma': 'no-cache',
            'Referer': 'https://www.daqing.gov.cn/daqing/c100375/zfxxgk_list.shtml',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest',
            'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"'
        }
        for item in item_list:
            title = item['title']
            url = item['url']
            content = item['contentHtml']
            publish_time = item['publishedTimeStr']
            d = {'url': url, 'title': title, 'publish_date': publish_time, 'content': content}
            yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.parse_detail, meta=d)

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response=None, **kwargs):
        item = Item()
        item['title'] = response.meta.get('title')
        item['source_url'] = response.meta.get('url')
        item['publish_date'] = response.meta.get('publish_date')
        item['content'] = response.xpath(".").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl heilongjiang_dqsrmzfqt".split())
