from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item
from utils.tools import urlencode, urldecode, parse_url_params


class HenanJzswxrmzf25C8Policy(BaseTaxPolicySpider):
    name = "henan_jzswxrmzf_25c8_policy"
    province: str = "河南省"  # 取表格
    city: str = "焦作市"  # 取表格
    county: str = "温县"  # 取表格
    park: str = ""  # 取表格
    source: str = "焦作市温县人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.wenxian.gov.cn/sitesources/wxdzw/page_pc/zwgk/zfwj/wzt/list1.html"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        yield self.Request(self.url, callback=self.parse_page_num)

    def parse_page_num(self, response, **kwargs):
        data = {
            "article.aYear": "",
            "article.aNo": "",
            "article.title": "",
            "catalog.id": response.xpath('//input[@name="catalog.id"]/@value').get(),
            "limit": "10",
            "offset": "0",
            "sort": "pubtime",
            "order": "desc"
        }
        yield self.FormRequest("http://www.wenxian.gov.cn/front/xxgk/list", formdata=data, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        yield from self.parse_list(response, **kwargs)
        data = response.json()
        page_num = int(data['total'] // 10) + 1
        for i in range(1, page_num + 1):
            url, params = parse_url_params(response.request.body.decode())
            params["offset"] = str(i * 10)
            yield self.FormRequest("http://www.wenxian.gov.cn/front/xxgk/list", formdata=params, callback=self.parse_page)

    def parse_list(self, response, **kwargs):
        data = response.json()
        datas = data['rows']
        for url in datas:
            yield response.follow(url['htmlpath'], callback=self.parse_content)

    def parse_content(self, response, **kwargs):
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=response.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        if item["publish_date"]:
            yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_jzswxrmzf_25c8_policy".split())
