import os

import scrapy

from ..items import NovelItem


class RequestZonghengSpider(scrapy.Spider):
    name = "request_zongheng"

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.base_dir = "novels"
        if not os.path.exists(self.base_dir):
            os.makedirs(self.base_dir)


    async def start(self):
        for i in range(1,261):
            yield scrapy.FormRequest(f"https://www.zongheng.com/api2/catefine/storeSearch", formdata={
                "worksTypes": "0",
                "bookType": "0",
                "subWorksTypes": "0",
                "totalWord": "0",
                "serialStatus": "1",
                "vip": "0",
                "pageNum": f"{i}",
                "pageSize": "20",
                "naodongFilter": "0",
                "order": "weekOrder",
                "categoryId": "0",
                "categoryPid": "0"
            })

    async def parse(self,response):
        for book in response.json()["result"]["bookList"]:
            print(book["name"], book["bookId"])
            yield scrapy.FormRequest(f"https://bookapi.zongheng.com/api/chapter/getChapterList", formdata={
                "bookId": str(book["bookId"]),
            },
             cb_kwargs={
                 "book_id": book["bookId"],
                 "book_name": book["name"],
                 "author": book.get("authorName", ""),
                 "category": book.get("categoryName", "")
             },callback=self.parse_book)
    async def parse_book(self,response,book_id, book_name, author, category):
        for chapter in response.json()["result"]["chapterList"]:
           for chapter_item in chapter["chapterViewList"]:
               item = NovelItem()
               item['book_id'] = book_id
               item['book_name'] = book_name
               item['author'] = author
               item['category'] = category
               item['chapter_id'] = chapter_item["chapterId"]
               item['chapter_name'] = chapter_item["chapterName"]
               item['chapter_index'] = chapter_item.get("chapterIndex", 0)
               item['chapter_url'] = f"https://read.zongheng.com/chapter/{chapter_item['bookId']}/{chapter_item['chapterId']}.html"

               yield scrapy.Request(
                   item['chapter_url'],
                   cb_kwargs={'item': item},
                   callback=self.parse_chapter
               )

    async def parse_chapter(self,response,item):
        content = response.xpath("//div[@class='content']/p/text()").getall()
        item['content'] = "\n".join(content)
        yield item


