#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/4/1 14:39
# @Author  : 王凯
# @Project : scrapy_spider
import json
import re

import scrapy

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import urlencode, urldecode, parse_url_params


class henan_jzsjfqrmzf_96e3_policy(BaseTaxPolicySpider):
    name = "henan_jzsjfqrmzf_96e3_policy"
    province: str = "河南省"  # 取表格
    city: str = "焦作市"  # 取表格
    county: str = "解放区"  # 取表格
    park: str = ""  # 取表格
    source: str = "焦作市解放区人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.jfq.gov.cn/xxgkpt/list?id=265&cid=265"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = 'https://www.jfq.gov.cn/xxgkpt/list?%2Fxxgkpt%2Flist=&id=265&cid=265&page=1'
        yield scrapy.Request(url, callback=self.parse_page)
        url = 'https://www.jfq.gov.cn/xxgkpt/list?%2Fxxgkpt%2Flist=&id=257&cid=24&page=1'
        yield scrapy.Request(url, callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        yield from self.parse_list(response, **kwargs)
        page_num = int(response.xpath("//li/a[contains(@href, 'page=')]")[-2].xpath("./text()").get())
        for i in range(2, page_num + 1):
            root_url, params = parse_url_params(response.request.url)
            params["page"] = i
            new_url = root_url + "?" + urlencode(params)
            yield scrapy.Request(new_url, callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        for url in response.xpath('//*[@class="zfxxgk_zd2"]//a/@href').extract():
            yield response.follow(url, callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get("item") is None else response.meta.get("item")
        content = response.text
        content = self.process_content(content)
        pre_data = self.parse_title_and_publish_time_by_gen(content)
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )
        content = self.process_content(content)
        item["title"] = response.xpath("""string(//title)""").get()
        item["publish_date"] = publish_date
        item["content"] = content
        item["province"] = self.province
        item["city"] = self.city
        item["county"] = self.county
        item["park"] = self.park
        item["source"] = self.source
        item["source_url"] = response.request.url
        if item["publish_date"]:
            yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl henan_jzsjfqrmzf_96e3_policy".split())
