import datetime
import json
from typing import Iterable

import scrapy
from gne import GeneralNewsExtractor
from scrapy import Request

from apps.tax_news.tax_news.items import NetIndustryNewsItem


class SHZQBIndustryNew(scrapy.Spider):
    source = "上海证券报"
    name = "shzqb_industry_new"

    def start_requests(self) -> Iterable[Request]:
        url = "https://api.cnstock.com/www/newsList/channelNewsList"
        node_ids = ["10005", "10232", "10011", "10006", "10007"]
        for nodeId in node_ids:
            data = {"nodeId": nodeId, "filterIdArray": [], "isChannel": True, "pageNum": 1, "pageSize": 32, "startTime": 0}
            yield scrapy.Request(
                url=url,
                callback=self.parse,
                method="POST",
                body=json.dumps(data),
                headers={
                    "cnstock-client-type": "01",
                    "content-type": "application/json",
                },
                cb_kwargs={
                    "nodeId": nodeId,
                }
            )

    def parse(self, response, **kwargs):
        resp = response.json()
        datas = resp["data"]["pageInfo"]["list"]
        results = []
        for info in datas:
            if "childList" in info:
                child_list = info["childList"]
                for child in child_list:
                    results.append(child)
            else:
                results.append(info)

        data = resp["data"]["pageInfo"]["list"][-1]
        pub_time = datetime.datetime(
            year=int(data["shareInfo"]["dateInfo"]["year"]),
            month=int(data["shareInfo"]["dateInfo"]["month"]),
            day=int(data["shareInfo"]["dateInfo"]["day"]),
            hour=int(data["shareInfo"]["dateInfo"]["hour"]),
            minute=int(data["shareInfo"]["dateInfo"]["minute"]),
        )

        if pub_time > datetime.datetime.now().replace(hour=0, minute=0, second=0) - datetime.timedelta(days=1):
            next_page_num = resp["data"]["pageInfo"]["nextPageNum"]
            start_time = resp["data"]["pageInfo"]["startTime"]
            url = "https://api.cnstock.com/www/newsList/channelNewsList"
            data = {
                "nodeId": kwargs["nodeId"],
                "filterIdArray": [],
                "isChannel": True,
                "pageNum": next_page_num,
                "pageSize": 32,
                "startTime": start_time,
            }
            yield scrapy.Request(
                url=url,
                callback=self.parse,
                method="POST",
                body=json.dumps(data),
                headers={
                    "cnstock-client-type": "01",
                    "content-type": "application/json",
                },
                cb_kwargs=kwargs
            )

        for result in results:
            yield scrapy.Request(
                f"https://www.cnstock.com/commonDetail/{result['contId']}",
                callback=self.parse_detail,
            )

    def parse_detail(self, response, **kwargs):
        resp = response.xpath('//script[@id="__NEXT_DATA__"]/text()').get()
        json_data = json.loads(resp)
        extractor = GeneralNewsExtractor()
        info = extractor.extract(
            json_data["props"]["pageProps"]["data"]["textInfo"]["content"],
            body_xpath='.',
            with_body_html=True
        )
        item = NetIndustryNewsItem(
            title=json_data["props"]["pageProps"]["data"]["title"],
            publish_time=json_data["props"]["pageProps"]["data"]["pubTime"],
            url=response.url,
            source=self.source,
            body_html=info["body_html"],
            tags="|".join([i["name"].strip("#") for i in json_data["props"]["pageProps"]["data"]["tags"]]),
            image_list=(
                [json_data["props"]["pageProps"]["data"]["shareInfo"]["coverPic"]]
                if "default_cover.png" not in (json_data["props"]["pageProps"]["data"]["shareInfo"]["coverPic"] or "")
                else []
            ),
        )
        yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl shzqb_industry_new".split())


if __name__ == "__main__":
    run()
