
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item


class JiangxiJasfzhggwyh6AabPolicy(BaseTaxPolicySpider):
    name = "jiangxi_jasfzhggwyh_6aab_policy"
    province: str = "江西省"  # 取表格
    city: str = "吉安市"  # 取表格
    county: str = ""  # 取表格
    park: str = ""  # 取表格
    source: str = "吉安市发展和改革委员会"  # 取表格 同一个来源合并
    url: str = "http://fgw.jian.gov.cn/xxgk-list-gfxwja.html"  # 注明入口网址，以便后续排错
    auto_next: bool = True
    custom_settings = {
        "DEFAULT_REQUEST_HEADERS": {
            "Content-Type": "application/x-www-form-urlencoded",
        }
    }

    def start_requests(self):
        yield self.Request(
            self.url,
            callback=self.return_code,
        )

    def return_code(self, response):
        site_code = response.xpath('//script').re_first(r"ajax_type\[1\]': (.*?),")
        site_code = site_code or response.xpath('.').re_first(r"\[\"\d+_xxgk\",[\"]?(\d+)[\"]?")
        county_code = response.xpath('//script').re_first(r"ajax_type\[2\]': (.*?),")
        county_code = county_code or response.xpath('.').re_first(r"\[\"(\d+)_xxgk\",")

        data = f'ajax_type[]={county_code}_xxgk&ajax_type[]={site_code}&ajax_type[]={county_code}&ajax_type[]=xxgk&ajax_type[]=Y-m-d&ajax_type[]=50&ajax_type[]=20&ajax_type[7][]=is_top DESC&ajax_type[7][]=displayorder DESC&ajax_type[7][]=inputtime DESC&ajax_type[]=&is_ds=1'
        yield from self.gen_request_by_api_ajax_list(
            url="http://fgw.jian.gov.cn/api-ajax_list-1.html",
            data=data,
            list_a_xpath=None,
            end_tail="-1.html",
            callback=self.parse_content,
        )

    def parse_content(self, response, **kwargs):
        request = response.request
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=request.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl jiangxi_jasfzhggwyh_6aab_policy".split())
