import json

import parsel

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item, urlencode


class Henan_Hbsfzhggwyh_9Add_Policy(BaseTaxPolicySpider):
    name = "henan_hbsfzhggwyh_9add_policy"
    province: str = "河南省"  # 取表格
    city: str = "鹤壁市"  # 取表格
    county: str = ""  # 取表格
    park: str = ""  # 取表格
    source: str = "鹤壁市发展和改革委员会"  # 取表格 同一个来源合并
    url: str = "https://fgw.hebi.gov.cn/zfxxgk/zc/qtwj/index.html"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        yield self.Request(url=self.url, callback=self.parse)

    def parse(self, response, **kwargs):
        url = response.urljoin(response.xpath("//*[@querydata]/@url").get())
        params = json.loads(response.xpath("//*[@querydata]/@querydata").get().replace("'", '"'))
        yield self.Request(url=url + "?" + urlencode(params), callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        resp_html = json.loads(response.text)["data"]["html"]
        resp = parsel.Selector(resp_html)
        url_list = resp.xpath("//li//a") or []
        for i in url_list:
            url = i.xpath("@href").get()
            yield response.follow(url=url, callback=self.parse_content)
        if self.auto_next:
            if url_list and resp.xpath("//*[@querydata]/@querydata"):
                params = json.loads(resp.xpath("//*[@querydata]/@querydata").get().replace("'", '"'))
                url = response.urljoin(resp.xpath("//*[@querydata]/@uniturl").get())
                page_no = resp.xpath("//*[@querydata]/@pageno").get()
                page_size = resp.xpath("//*[@querydata]/@rows").get()
                if len(url_list) <= int(page_size):
                    params.update({"paramJson": {"pageNo": int(page_no) + 1, "pageSize": page_size}})
                    yield self.Request(url=url + "?" + urlencode(params), callback=self.parse_list)

    def parse_content(self, response, **kwargs):
        request = response.request
        content = response.text
        content = self.process_content(content)
        pre_data = self.parse_title_and_publish_time_by_gen(content)
        title = pre_data.get("title")
        publish_date = (
            response.xpath('//*[@name="PubDate"]/@content').get()
            or response.xpath('//*[@name="pub_date"]/@content').get()
            or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=request.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        if item["publish_date"]:
            yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_hbsfzhggwyh_9add_policy".split())
