import shutil
from typing import AsyncIterator, Any
import os
import scrapy


class ZonghengSpider(scrapy.Spider):
    name = "zongheng"
    # allowed_domains = ["quotes.toscrape.com"]
    start_urls = ["https://www.zongheng.com/api2/catefine/storeSearch"]

    async def start(self) -> AsyncIterator[Any]:
        url = "https://www.zongheng.com/api2/catefine/storeSearch"
        yield scrapy.FormRequest(
            url,
            headers={
                "referer": "https://www.zongheng.com/books?",
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36",
                "content-type": "application/x-www-form-urlencoded",
            },
            formdata={
                "serialStatus": "1",
                "vip": "9",
                "pageNum": "2",
                "pageSize": "20",
                "order": "weekOrder",
            },
            cookies={
                "ZHID": "D042420BC1064E941BDC0A2B7BB9A168",
                "acw_tc": "ac11000117624089873925981ef2d2851a45898f6b3e69c2d95d9560a9db04",
                "zhffr": "www.baidu.com",
                "sensorsdata2015jssdkcross": "%7B%22distinct_id%22%3A%2219a3510469139a-07263436ca515e-26061851-2073600-19a351046926d4%22%2C%22%24device_id%22%3A%2219a3510469139a-07263436ca515e-26061851-2073600-19a351046926d4%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D",
            },
        )

    def parse(self, response):
        data = response.json()["result"]["bookList"]
        # print(response.json())
        for item in data:
            # print(item)
            # print(item["bookId"])

            yield scrapy.FormRequest(
                f"https://bookapi.zongheng.com/api/chapter/getChapterList",
                formdata={"bookId": f"{item['bookId']}"},
                headers={
                    "referer": "https://www.zongheng.com/",
                    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36",
                    "content-type": "application/x-www-form-urlencoded",
                },
                cookies={
                    "ZHID": "D042420BC1064E941BDC0A2B7BB9A168",
                    "zhffr": "www.baidu.com",
                    "sensorsdata2015jssdkcross": "%7B%22distinct_id%22%3A%2219a3510469139a-07263436ca515e-26061851-2073600-19a351046926d4%22%2C%22%24device_id%22%3A%2219a3510469139a-07263436ca515e-26061851-2073600-19a351046926d4%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D",
                },
                callback=self.detail_data,
                cb_kwargs={"bookId": item["bookId"], "name": item["name"]},
            )
            # break

    def detail_data(self, response, bookId, name):
        data = response.json()["result"]["chapterList"]
        for items in data:
            for item in items["chapterViewList"]:
                # print(item)
                tomeId = item["chapterId"]
                url = f"https://read.zongheng.com/chapter/{bookId}/{tomeId}.html"
                yield scrapy.Request(
                    url=url,
                    callback=self.content_data,
                    cb_kwargs={"chapterName": item["chapterName"], "name": name},
                )
                # break
            # break

    def content_data(self, response, name, chapterName):
        lines = response.xpath('//div[@class="content"]/p/text()').getall()
        if not os.path.exists(f"static/{name}"):
            os.makedirs(f"static/{name}")
        content = "\n".join(lines)
        with open(f"static/{name}/{chapterName}.text", "w") as f:
            f.write(content)
        print(f"{name}的第{chapterName}章爬取完毕")
