import datetime
import re
from typing import Iterable

import scrapy
from gne import GeneralNewsExtractor
from scrapy import Request

from apps.tax_news.tax_news.items import NetIndustryNewsItem
from utils.tools import format_date, to_date


class CyfzyjIndustryNewsSpider(scrapy.Spider):
    name = "cyfzyj_industry_news"
    url = "http://www.chinaidr.com/tradenews/"
    source = "产业发展研究网"

    def start_requests(self) -> Iterable[Request]:
        yield Request(url=self.url, callback=self.parse)

    def parse(self, response, **kwargs):
        href_list = response.xpath("//ul[@class='bigtradelist']//a")
        for industry in href_list:
            href = industry.xpath("./@href").get()
            href = href.replace("trade", "tradenews")
            href = href + "_1"
            yield response.follow(href, callback=self.parse_list)

    def parse_list(self, response, **kwargs):

        item_list = response.xpath('//*[@class="news_left"]//li')
        has_next = True if item_list else False
        industry_key_list = ''.join(response.xpath('//*[@class="curpos"]/text()').getall()).split()
        for item in item_list:
            industry_url = item.xpath('.//a/@href').get()
            pub_date = item.xpath('.//span/text()').get()
            year = re.findall(r"(\d{4})-", industry_url)
            if year:
                pub_date = format_date(f"{year[0]}-" + pub_date)
            else:
                pub_date = format_date(f"{datetime.datetime.now().year}-" + pub_date)

            if (
                    to_date(pub_date) > datetime.datetime.now().replace(hour=0, minute=0, second=0)
                    - datetime.timedelta(days=1)
            ):
                yield response.follow(
                    industry_url, method="GET", callback=self.parse_detail,
                    cb_kwargs=dict(industry_key_list=industry_key_list)
                )

        if has_next:
            base_url, page = response.request.url.split("_")
            yield response.follow(
                f"{base_url}_{int(page) + 1}",
                method="GET",
                callback=self.parse,
            )

    def parse_detail(self, response, **kwargs):
        industry_key_list = kwargs.get("industry_key_list")
        extractor = GeneralNewsExtractor()
        info = extractor.extract(
            response.xpath('//*[@class="content"]').get()
            .replace(response.xpath('//*[@class="news_right"]').get() or '', '')
            .replace(response.xpath('//*[@class="footer_links"]').get() or '', '')
            .replace(response.xpath('//*[@class="backhome"]').get() or '', ''),
            with_body_html=True
        )
        item = NetIndustryNewsItem()
        item.title = info["title"]
        item.publish_time = info["publish_time"]
        item.body_html = info["body_html"]
        item.tags = '|'.join(industry_key_list)
        item.source = self.source
        item.url = response.request.url
        if item.publish_time:
            yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl cyfzyj_industry_news".split())


if __name__ == "__main__":
    run()
