#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/10/10 13:50
# @Author  : 王凯
# @File    : beijing_rmzf_policy.py
# @Project : scrapy_spider
from typing import Any

from scrapy.http import Response

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item


class BeijingRmzfPolicy(BaseTaxPolicySpider):
    name = "beijing_rmzf_policy"
    province: str = "北京市"  # 取表格
    city: str = "北京市"  # 取表格
    county: str = ""  # 取表格
    park: str = ""  # 取表格
    source: str = "北京市人民政府"  # 取表格 同一个来源合并
    url: str = "https://www.beijing.gov.cn/zhengce/zhengcefagui/index.html"  # 注明入口网址，以便后续排错
    auto_next: bool = True

    def start_requests(self):
        yield self.Request(url=self.url, callback=self.parse)

    def parse(self, response: Response, **kwargs: Any) -> Any:
        for url in response.xpath('//*[@class="default_news"]//a'):
            url = url.xpath("./@href").get()
            if not url or any(c in url for c in [".doc", ".pdf", ".xls"]):
                pass
            else:
                yield response.follow(url=url, callback=self.parse_content)

    def parse_content(self, response, **kwargs):
        request = response.request
        content = response.text
        content = self.process_content(content)
        pre_data = self.parse_title_and_publish_time_by_gen(content)
        title = response.xpath('//*[@name="ArticleTitle"]/@content').get()
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )[:10]
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=request.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        if item['publish_date']:
            yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl beijing_rmzf_policy".split())
