
import json
import re
from tax_policy.base.base_tax_policy_spider import BaseTaxPolicySpider
from tax_policy.items import NetTaxPolicyItem


class {{ function_name  }}(BaseTaxPolicySpider):
    name = "{{ name }}"
    province: str = "{{ province }}"  # 取表格
    city: str = "{{ city }}"  # 取表格
    county: str = "{{ county }}"  # 取表格
    park: str = "{{ park }}"  # 取表格
    source: str = "{{ source }}"  # 取表格 同一个来源合并
    url: str = "{{ url }}"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        yield self.Request(self.url, callback=self.parse)

    def parse(self, response, **kwargs):
        resp = json.loads(re.findall(r"({.*})\);", response.text)[0])
        page = resp.get("page", {}).get("totalPages")
        pageNumber = resp.get("page", {}).get("pageNumber")
        print(pageNumber)
        datas = resp.get("page", {}).get("content", [])
        for data in datas:
            yield self.Request(data.get("DOCPUBURL"), callback=self.parse_content)
        if pageNumber < page:
            next_page_url = self.url.replace("pageNumber=1", f"pageNumber={pageNumber + 1}")
            yield self.Request(next_page_url, callback=self.parse)

    def parse_content(self, response, **kwargs):
        request = response.request
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
            response.xpath('//*[@name="PubDate"]/@content').get()
            or response.xpath('//*[@name="pub_date"]/@content').get()
            or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=request.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl {{ name }}".split())

