import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "yingkou_yksczj"

    province: str = "辽宁省"  # 取表格
    city: str = "营口市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "营口市财政局"  # 取表格 同一个来源合并
    url: str = "http://czj.yingkou.gov.cn/govpub/govGuidePub.html?categorynum=007&lm=cate7"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://czj.yingkou.gov.cn/EWB_YK_Mid/rest/lightfrontaction/getgovinfolist"

        # 设置请求头
        headers = {
            'Content-Type': 'application/json'
        }

        # 设置POST数据
        payload = {
            "token": "",
            "params": {
                "deptcode": "",
                "categorynum": "007",
                "pageIndex": 0,
                "pageSize": 15,
                "siteGuid": "ff81900c-7f9d-486a-b015-4fa6b8fa031e"
            }
        }

        # 发送POST请求
        yield scrapy.FormRequest(
            url=url,
            method='POST',
            body=json.dumps(payload),
            headers=headers,
            callback=self.detail_requests
        )

    def detail_requests(self, response, **kwargs):
        print(response.text)
        data_list = json.loads(response.text)['custom']['data']  # 取链接
        total_num = json.loads(response.text)['custom']['total']
        for data in data_list:
            if 'http' not in data['infourl']:
                link = "http://czj.yingkou.gov.cn" + data['infourl']
            else:
                link = data['infourl']
            pub_date = data['infodate']
            tit = data['title']
            yield scrapy.Request(url=link, callback=self.parse_detail, meta={'pub_date': pub_date, 'tit': tit})

        # 翻页逻辑 原网页暂无翻页
        # if response.meta.get("is_next") is not False:
        #     pages = math.ceil(total_num / 15)
        #     if pages > 1:
        #         print("总页数：", pages)
        #         for page in range(2, pages + 1):
        #             print("当前页：", page)
        #             url = "http://scjgj.yingkou.gov.cn/EWB_YK_Mid/rest/lightfrontaction/getgovinfolist"
        #             # 设置请求头
        #             headers = {
        #                 'Content-Type': 'application/json'
        #             }
        #
        #             # 设置POST数据
        #             payload = {
        #                 "token": "",
        #                 "params": {
        #                     "deptcode": "",
        #                     "categorynum": "003028006",
        #                     "pageIndex": page - 1,
        #                     "pageSize": 15,
        #                     "siteGuid": "1a591b6b-bdde-477f-a423-37442c94500c"
        #                 }
        #             }
        #
        #             # 发送POST请求
        #             yield scrapy.FormRequest(
        #                 url=url,
        #                 method='POST',
        #                 body=json.dumps(payload),
        #                 headers=headers,
        #                 callback=self.detail_requests,
        #                 meta={'is_next': False}
        #             )

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        item['title'] = response.meta.get('tit')
        item['source_url'] = response.url
        item['publish_date'] = response.meta.get('pub_date')
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl yingkou_yksczj".split())
