# -*- coding: utf-8 -*-
# @Time    : 2023/11/26 21:45
# @Author  : wkaanig
# @File    : fj_fzglrmzf_policy
# @Email   : 15670813638@163.com
# @Software: PyCharm
import re





from tax_policy.base.base_tax_policy_spider import BaseTaxPolicySpider
from tax_policy.items import NetTaxPolicyItem


class {{ function_name  }}(BaseTaxPolicySpider):
    name = "{{ name }}"
    province: str = "{{ province }}"  # 取表格
    city: str = "{{ city }}"  # 取表格
    county: str = "{{ county }}"  # 取表格
    park: str = "{{ park }}"  # 取表格
    source: str = "{{ source }}"  # 取表格 同一个来源合并
    url: str = "{{ url }}"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        yield feapder.Request(self.url, method="GET")

    def parse(self, response, **kwargs):
        if "list2.trsapi" in response.text:
            host = '/'.join(self.url.split("/")[:3])
            trs_api_url = "/fjdzapp/search"
            trsapi_url = re.findall(r"""trsapi_url\s*:\s*['"](.*?)["']""", response.text)
            if trsapi_url:
                trs_api_url = trsapi_url[0]
            url = host + trs_api_url
            class_sql_2 = re.findall(
                r"""var classsql\s*=.{0,100}?\?\s*['"](.*?)["']\s*:\s*['"](.*?)["']""", response.text,
                re.S
            )
            class_sql_2 = class_sql_2 or re.findall(r"""classsql:\s*['"](.*?)["']\s*\|\|\s*['"](.*?)["']""", response.text)
            class_sql_2 = class_sql_2 or re.findall(r"""classsql\s*=\s*['"](.*?)["'](.*?);""", response.text)
            if class_sql_2:
                for class_sql in class_sql_2[0]:
                    if class_sql:
                        data = {
                            "channelid": "229105",
                            "sortfield": "-docreltime",
                            "classsql": class_sql,
                            "classcol": "publishyear",
                            "classnum": "100",
                            "classsort": "0",
                            "cache": "true",
                            "page": "1",
                            "prepage": "500"
                        }
                        yield feapder.Request(url, method="POST", data=data, callback=self.parse_list)
        else:
            print("遇到了无法解析的class_sql")

    def download_midware(self, request):
        request.headers = {"Referer": self.url}
        request.proxies = get_company_ip_crawler()
        return request

    def parse_list(self, response, **kwargs):
        datas = response.json["data"]
        for data in datas:
            url = data.get("chnldocurl")
            if url:
                yield feapder.Request(url, method="GET", callback=self.parse_content)

    def parse_content(self, response, **kwargs):
        request = response.request
        # 使用自动解析工具拿到标题发布链接即可
        # 取不到的需写补充逻辑
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )
        publish_date = publish_date[:10]
        publish_no = response.xpath('string(//*[@class="fwzh"]/span)').get()
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=request.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
            publish_no=publish_no,
        )
        if publish_date:
            yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl {{ name }}".split())

