import json

import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import datetime
import math


class NeimengguWlcbsfzsrmzf097EPolicy(BaseTaxPolicySpider):
    name = "neimenggu_wlcbsfzsrmzf_097e_policy"

    province: str = "内蒙古自治区"  # 取表格
    city: str = "乌兰察布市"  # 取表格
    county: str = "丰镇市"  # 取表格
    park: str = "None"  # 取表格
    source: str = "乌兰察布市丰镇市人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.fengzhen.gov.cn/zfxxgkzn/code-1-3-4/index.html"  # 注明入口网址，以便后续排错
    host_url = url.split('.gov.cn')[0] + '.gov.cn'

    def start_requests(self):
        code = self.url.split('/')[-2]
        url = f'{self.host_url}/apiv2/zwgk/contentList?page=1&page_size=10&code={code}&orgs_id='
        yield scrapy.Request(url, callback=self.return_content)

    def return_content(self, response, **kwargs):
        datas = response.json()['data']
        need_save = response.meta.get('need_save')
        for data in datas:
            code = data['menu_code']
            if data['menu_data']:
                if data['menu_data']['total'] > 10 and not need_save:
                    for num in range(1, data['menu_data']['last_page'] + 1):
                        url = f'{self.host_url}/apiv2/zwgk/contentList?page={num}&page_size=10&code={code}&orgs_id='
                        yield response.follow(url, callback=self.return_content, meta={"need_save": True})
                elif data['menu_data']['total'] > 0:
                    for detail in data['menu_data']['data']:
                        item = Item()
                        item['title'] = detail.get('title')
                        item['source_url'] = response.url
                        item['publish_date'] = detail.get('pulish_time_text')
                        item['content'] = detail.get('content')
                        item['source'] = self.source
                        item['province'] = self.province
                        item['city'] = self.city
                        item['county'] = self.county
                        item['park'] = self.park
                        yield item

    def parse_detail(self, response, **kwargs):
        item = Item()
        item['title'] = response.get('title')
        item['source_url'] = response.url
        item['publish_date'] = response.get('pulish_time_text')
        item['content'] = response.get('content')
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl neimenggu_wlcbsfzsrmzf_097e_policy".split())
