import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item


class {{ function_name  }}(BaseTaxPolicySpider):
    name = "{{ name }}"
    province: str = "{{ province }}"  # 取表格
    city: str = "{{ city }}"  # 取表格
    county: str = "{{ county }}"  # 取表格
    park: str = "{{ park }}"  # 取表格
    source: str = "{{ source }}"  # 取表格 同一个来源合并
    url: str = "{{ url }}"  # 注明入口网址，以便后续排错
    auto_next: bool = True
    page_size: int = 18

    def __init__(self, **kwargs):
        kwargs = kwargs or {}
        kwargs.update(**dict(
            DOWNLOADER="tools.jsl.downloader.JSLRequestsDownloader",
        ))
        super().__init__(**kwargs)

    def start_requests(self):
        yield self.Request(self.url, callback=self._parser_cms_html)

    def _parser_cms_html(self, response, **kwargs):
        total_record = response.re_first(r"totalRecord:\s*(\d+),")
        site_id = response.re_first(r"columnId:'(.*?)'")
        for a in response.xpath('//*[@id="initData"]//a'):
            host = re.findall(r"(http://.*?)/", response.url)
            url = a.xpath("./@onclick").re_first(r"\([']*(.*?)[']*\)")
            if url and not url.startswith('http'):
                url = host[0] + a.xpath("./@onclick").re_first(r"\([']*(.*?)[']*\)")
            if url and not url.endswith(".pdf"):
                yield self.Request(url, callback=self.parse_content)

        if site_id and total_record:
            host = response.url.split("//")[1].split("/")[0]
            protocol = response.url.split("//")[0]
            url = (
                    protocol + '//' + host + "/TrueCMS/messageController/getMessage.do"
            )
            params = {
                "callback": "",
                "startrecord": "1",
                "endrecord": "45",
                "perpage": "15",
                "contentTemplate": "",
                "columnId": f"{site_id}",
            }
            if int(total_record) > 45:
                yield from self.gen_request_by_cms(
                    url, params, callback=self.parse_content, refer=self.url, auto_next=self.auto_next
                )

    def parse_content(self, response, **kwargs):
        request = response.request
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=request.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl {{ name }}".split())

