import datetime

import parsel
import scrapy
from gne import GeneralNewsExtractor

from apps.tax_news.tax_news.items import NetTaxNewsItem
from utils.tools import format_date, to_date, urlencode, parse_url_params


class NationTaxNewsSpider(scrapy.Spider):
    url = "https://www.chinatax.gov.cn/chinatax/n810219/n810780/common_mtsd_list.html"
    source = "国家税务总局"
    name = "nation_tax_news"

    def start_requests(self):
        url = "https://www.chinatax.gov.cn/chinatax/manuscriptList/n810780"
        params = {
            "_isAgg": "0",
            "_pageSize": "20",
            "_template": "index",
            "_channelName": "媒体视点",
            "_keyWH": "wenhao",
            "page": "1",
        }
        yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse)

    def parse(self, response, **kwargs):
        root_url, params = parse_url_params(response.request.url)
        item_list = response.xpath('//ul[@class="list"]//li')
        has_next = True if item_list else False
        for item in item_list:
            source = item.xpath("string(./a)").re_first(r"\[(.*?)\]")
            pub_date = format_date(item.xpath("string(./span)").re_first(r"\[(.*?)\]"))
            industry_url = item.xpath(".//a/@href").get()
            diff = (datetime.datetime.today().date() - to_date(pub_date).date()).days
            if diff >= 3:
                has_next = False  # 获取近几天的数据
                break
            else:
                yield scrapy.Request(
                    industry_url,
                    method="GET",
                    callback=self.parse_detail,
                    cb_kwargs=dict(
                        source=source,
                        pub_date=pub_date,
                    ),
                )

        if has_next:
            yield scrapy.Request(
                root_url + "?" + urlencode({**params, "page": str(int(params.get("page")) + 1)}),
                method="GET",
                callback=self.parse,
            )

    def parse_detail(self, response, **kwargs):
        extractor = GeneralNewsExtractor()
        info = extractor.extract(
            response.text,
            with_body_html=True,
            body_xpath='//div[@id="fontzoom"]',
            title_xpath='//meta[@name="ArticleTitle"]/@content',
        )
        source = kwargs.get("source") or self.source
        item = NetTaxNewsItem()
        item.title = info["title"]
        item.publish_time = info["publish_time"]
        item.body_html = info["body_html"]
        item.source = source
        item.url = response.request.url
        if (parsel.Selector(item.body_html).xpath("string(.)").get() or "").strip():
            if item.publish_time:
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl nation_tax_news".split())


if __name__ == "__main__":
    run()
