#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import json
from urllib.parse import urljoin

import execjs
import scrapy

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class Henan_Bfxrmzf_A2C8_Policy(BaseTaxPolicySpider):
    name = "henan_bfxrmzf_a2c8_policy"
    province: str = "河南省"  # 取表格
    city: str = "平顶山市"  # 取表格
    county: str = "宝丰县"  # 取表格
    park: str = ""  # 取表格
    source: str = "宝丰县人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.baofeng.gov.cn/channels/32564.html"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        url = "http://www.baofeng.gov.cn/channels/32564.html"
        yield scrapy.Request(url, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        data = response.xpath('.').re_first(r"""parms=opt/dynamic/\"[\w\W]{0,20}data\s*=\s*({[\w\W]*?})""")
        data = execjs.eval(data)
        data = {k: str(v) for k, v in data.items()}
        url = "https://user.pds.gov.cn/api/Outer/T?parms=opt/dynamic/"
        yield scrapy.Request(url, method="POST", body=json.dumps(data), callback=self.parse_list, meta={'data': data})

    def parse_list(self, response, **kwargs):
        prev_item = response.meta.get('item')
        for elem in response.xpath("//li"):
            item = Item()
            item['source_url'] = elem.xpath(""".//a/@href""").re_first(r"""(.*/.*)""")
            item['source_url'] = urljoin(self.url, item['source_url'])
            item['publish_date'] = elem.xpath("""string(./span)""").get()
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})

        if response.meta.get("is_next") is not False:
            data = response.request.meta.get('data')
            total_page = response.xpath(""".""").re_first(r"""<option[^/]*?>(\d+)</option>[\W\w]{0,10}/select>""")
            for page_num in range(1, int(total_page) // 1):
                yield scrapy.FormRequest(response.url, method="POST", body=json.dumps({**data, **{"pageNum": str(page_num)}}), callback=self.parse_list, meta={'item': prev_item, 'is_next': False})

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get("item") is None else response.meta.get("item")

        content = response.text
        content = self.process_content(content)
        pre_data = self.parse_title_and_publish_time_by_gen(content)
        title = pre_data.get("title")
        publish_date = (
            response.xpath('//*[@name="PubDate"]/@content').get()
            or response.xpath('//*[@name="pub_date"]/@content').get()
            or pre_data.get("publish_time")
        )
        item["title"] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get() or title
        item["publish_date"] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get() or publish_date
        item["content"] = content
        item["province"] = self.province
        item["city"] = self.city
        item["county"] = self.county
        item["park"] = self.park
        item["source"] = self.source
        item["source_url"] = response.request.url
        if item["publish_date"]:
            yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_bfxrmzf_a2c8_policy".split())
