import json
from typing import Iterable

import scrapy
from scrapy import Request

from apps.tax_house.tax_house.items import TaxHouseDataItem


class TaxHouseSpider(scrapy.Spider):
    name = "tax_house_spider"
    tags = [
        "法规解读",
        "税收优惠",
        "税务合规",
        "税务问答",
        "会计问答",
        "纳税调整",
        "纳税评估",
        "财税表单",
        "财商资讯",
    ]

    def start_requests(self) -> Iterable[Request]:
        yield Request(url="https://www.shui5.cn/", callback=self.parse)

    def parse(self, response, **kwargs):
        for i in response.xpath('//*[@class="menu"]//li'):
            a = i.xpath(".//a")
            if a.xpath("./text()").get() in self.tags:
                yield Request(
                    url=a.xpath("./@href").get(),
                    callback=self.parse_list,
                    cb_kwargs={
                        "tag": a.xpath("./text()").get(),
                    },
                )

    def parse_list(self, response, **kwargs):
        tag = kwargs.get("tag")
        last_page = response.xpath('//*[@class="pageWrap"]//a[contains(text(), "末页")]')
        last_page_href = last_page.xpath("./@href").get()
        total_page = last_page.xpath("./@href").re_first(r".*_(\d+).")
        page = kwargs.get("page", 0)
        if total_page and page < int(total_page):
            yield Request(
                url=last_page_href.replace(f"_{total_page}.", f"_{page + 1}."),
                callback=self.parse_list,
                cb_kwargs={"page": page + 1, "tag": tag},
                dont_filter=True,
            )

        for i in response.xpath('//*[@class="xwt2"]'):
            a = i.xpath(".//a")
            yield Request(url=a.xpath("./@href").get(), callback=self.parse_detail, cb_kwargs=kwargs)

    def parse_detail(self, response, **kwargs):
        abstract = response.xpath('//meta[@name="description"]/@content').get()

        release_time = response.xpath('//*[@class="articleResource"]//span[contains(text(), "时间")]/text()').re_first(
            r".*[:：](.*)"
        )
        link = response.url
        content_type = kwargs.get("tag")
        title = (response.xpath('string(//*[@class="articleTitle"])').get() or "").strip()
        text = response.xpath('//*[@class="arcContent"]').get()

        item = {
            "title": title,
            "text": text,
            "link": link,
            "release_time": release_time,
            "content_type": content_type,
            "abstract": abstract,
        }
        yield TaxHouseDataItem(**item)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute(["scrapy", "crawl", "tax_house_spider"])
