import urllib.parse

from parsel import Selector

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
from utils.tools import make_item


class JiangxiJasafxrmzfD166Policy(BaseTaxPolicySpider):
    name = "jiangxi_jasafxrmzf_d166_policy"
    province: str = "江西省"  # 取表格
    city: str = "吉安市"  # 取表格
    county: str = "安福县"  # 取表格
    park: str = ""  # 取表格
    source: str = "吉安市安福县人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.afx.gov.cn/col/col73569/index.html"  # 注明入口网址，以便后续排错
    auto_next: bool = True
    page_size: int = 30

    def start_requests(self):
        yield self.Request(self.url, callback=self._parser_dataproxy_html)

    def _parser_dataproxy_html(self, response):
        next_url = response.xpath('.').re_first(r"""\[CDATA\[<a href="(.*?)"></a>\]\]></nextgroup>""")
        url_model = urllib.parse.urlparse(next_url)
        url = self.url.split('/col/')[0] + url_model.path
        yield from self.gen_request_by_dataproxy(
            url=url,
            data=dict(urllib.parse.parse_qsl(url_model.query)),
            callback=self.parse_content,
            auto_next=self.auto_next
        )

    def parse_response_by_dataproxy(self, response, **kwargs):
        callback = kwargs.get("callback")
        url_list = response.xpath("//record")
        self.logger.info(f"获取列表数量 {len(url_list)}")
        if not url_list:
            raise Exception("没有获取到url", response.text)
        for idx, i in enumerate(url_list):
            url_select = i.xpath("./text()").get()
            url = Selector(url_select).xpath(".//a/@href").get()
            self.logger.info(f"获取列表序列 {idx}/{len(url_list)} {url}")
            if url and not url.endswith(".pdf"):
                yield self.Request(url, callback=callback)

    def parse_content(self, response, **kwargs):
        request = response.request
        pre_data = self.parse_title_and_publish_time_by_gen(response.text)
        title = pre_data.get("title")
        publish_date = (
                response.xpath('//*[@name="PubDate"]/@content').get()
                or response.xpath('//*[@name="pub_date"]/@content').get()
                or pre_data.get("publish_time")
        )
        content = response.text
        content = self.process_content(content)

        item = dict(
            title=title,
            publish_date=publish_date,
            content=content,
            source_url=request.url,
            source=self.source,
            province=self.province,
            city=self.city,
            county=self.county,
            park=self.park,
        )
        yield make_item(NetTaxPolicyItem, item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl jiangxi_jasafxrmzf_d166_policy".split())
