import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "dalian_dlsgjzqrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "大连市"  # 取表格
    county: str = "甘井子区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "大连市甘井子区人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.dlgjz.gov.cn/#/government-info-open?index=1&year=2023"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = 'https://www.dlgjz.gov.cn/external/affairs/getNormativeDocumentList'
        payload = {
            "pagination": {
                "current": 1,
                "pagesize": 20,
                "total": 0
            },
            "query": {
                "label": "",
                "state": "1"
            }
        }

        headers = {
            'Content-Type': 'application/json',
        }

        # 发送POST请求
        yield scrapy.Request(
            url=url,
            method='POST',
            body=json.dumps(payload),
            headers=headers,
            callback=self.detail_requests
        )

    def detail_requests(self, response, **kwargs):
        notice_lis = json.loads(response.text)['datas']
        total_num = json.loads(response.text)['pagination']['total']
        pagesize = json.loads(response.text)['pagination']['pagesize']
        print(f'政策条数{total_num}，页面大小{pagesize}')
        if total_num > pagesize:
            raise ("翻页了")
        for notice_li in notice_lis:
            notice_id = notice_li['id']
            item = Item()
            item['title'] = notice_li['label']
            item['source_url'] = f"https://www.dlgjz.gov.cn/#/articleGuifan?currentLocation=%E8%A1%8C%E6%94%BF%E8%A7%84%E8%8C%83%E6%80%A7%E6%96%87%E4%BB%B6&id={notice_id}&questionnaireId={notice_id}"
            item['publish_date'] = notice_li['releaseDate']
            item['content'] = notice_li['body']
            item['source'] = self.source
            item['province'] = self.province
            item['city'] = self.city
            item['county'] = self.county
            item['park'] = self.park
            detail_url = f"https://www.dlgjz.gov.cn/external/affairs/getNormativeDocumentInfo/{notice_id}"
            yield scrapy.Request(url=detail_url, method='POST', callback=self.parse_detail, meta={'item': item})

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = response.meta.get('item')
        # print("item", item)
        # item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        # item['source_url'] = response.url
        # item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        # item['content'] = response.xpath(""".""").get()
        # item['source'] = self.source
        # item['province'] = self.province
        # item['city'] = self.city
        # item['county'] = self.county
        # item['park'] = self.park

        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl dalian_dlsgjzqrmzf".split())
