import os

import scrapy
from scrapy.http.request.json_request import JsonRequest

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36'
}


class ZonghengxiaoshuoSpider(scrapy.Spider):
    name = "zonghengxiaoshuo"

    async def start(self):
        for i in range(1, 501):
            yield scrapy.FormRequest('https://www.zongheng.com/api2/catefine/storeSearch', formdata={
                'serialStatus': '1',
                'vip': '9',
                'pageNum': f'{i}',
                'pageSize': '20'
            }, headers=headers, callback=self.parse)

    async def parse(self, response):
        book_list = response.json()['result']['bookList']
        for book in book_list:
            bookid = book['bookId']
            name = book['name']
            os.makedirs(f'./static/{name}/', exist_ok=True)
            yield JsonRequest('https://bookapi.zongheng.com/api/chapter/getChapterList', data={
                'bookid': bookid,
            }, callback=self.parse_detail, headers=headers, cb_kwargs={'name': name})

    async def parse_detail(self, response, name):
        chapterlist = response.json()['result']['chapterList']
        for i in range(1, len(chapterlist)):
            for tome in chapterlist[i]['chapterViewList']:
                read_url = f'https://read.zongheng.com/chapter/{tome["bookId"]}/{tome["chapterId"]}.html'
                headers.update({
                    'referer': response.url
                })
                yield scrapy.Request(read_url, headers=headers, callback=self.read_parse, cb_kwargs={
                    'tome_name': tome['chapterName'],
                    'name': name
                })

    async def read_parse(self, response, tome_name, name):
        reads = response.xpath("//div[@class='content']/p")
        if os.path.exists(f'./static/{name}/{tome_name}.txt'):
            os.remove(f'./static/{name}/{tome_name}.txt')
        for read in reads:
            content = read.xpath("./text()").get()
            with open(f'./static/{name}/{tome_name}.txt', 'a', encoding='utf-8') as f:
                if content:
                    f.write(f'{content}\n')
                else:
                    continue
