import json
import re

import scrapy

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class Henan_Ayslaqrmzf_C85C_Policy(BaseTaxPolicySpider):
    name = "henan_ayslaqrmzf_c85c_policy"
    province: str = "河南省"  # 取表格
    city: str = "安阳市"  # 取表格
    county: str = "龙安区"  # 取表格
    park: str = ""  # 取表格
    source: str = "安阳市龙安区人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.longan.gov.cn/sitesources/aylaq/page_pc/zwgk/zfwj/list1.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        yield scrapy.Request(self.url, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        url = response.xpath(".").re_first(r"scriptJsonPath\s*=\s*[\"'](.*?)[\"']")
        if url:
            yield response.follow(url, callback=self.parse_page_json)

    def parse_page_json(self, response, **kwargs):
        data = json.loads(response.text)
        page_num = data.get("endPage")
        index_url = response.urljoin(data.get("indexUrl"))
        for i in range(1, page_num + 1):
            new_url = re.sub(r"\d+.htm", f"{i}.htm", index_url)
            self.logger.info(f"parse_page_json: {new_url}")
            yield scrapy.Request(new_url, callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        for url in response.xpath('//*[@id="articleListTable"]//a/@href').extract():
            yield response.follow(url, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get("item") is None else response.meta.get("item")
        item["title"] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item["publish_date"] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item["content"] = response.xpath(""".""").get()
        item["province"] = self.province
        item["city"] = self.city
        item["county"] = self.county
        item["park"] = self.park
        item["source"] = self.source
        item["source_url"] = response.request.url
        if item["publish_date"]:
            yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_ayslaqrmzf_c85c_policy".split())
