import json

import aiohttp
import scrapy


class A纵横中文网Spider(scrapy.Spider):
    name = "纵横中文网"

    # start_urls = ["https://bai.com"]

    async def start(self):


        datas = {
            'serialStatus': '1',
            'vip': '9',
            'pageNum': '1',
            'pageSize': '20',
            'order': 'weekOrder',
        }


        yield scrapy.FormRequest('https://www.zongheng.com/api2/catefine/storeSearch',
        headers={'user-agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36"},
        formdata=datas,
          )


    def parse(self, response):
        datas = response.json()['result']['bookList']

        headers = {
            'referer': 'https://www.zongheng.com/',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36',
        }

        for data in datas:
            name = data['name']
            bookId = data['bookId']
            d = {'bookId':f'{bookId}'}

            yield scrapy.FormRequest(f'https://bookapi.zongheng.com/api/chapter/getChapterList',formdata=d,headers=headers,callback=self.parse2)
            break
    async def parse2(self, response):
        ds = response.json()['result']['chapterList']
        for d in ds:
            for chapter in d['chapterViewList']:
                bookId= chapter['bookId']
                chapterId= chapter['chapterId']
                # print(bookId,chapterId)
                yield scrapy.Request(f'https://read.zongheng.com/chapter/{bookId}/{chapterId}.html',callback=self.parse3)

                # break
            # break
    async def parse3(self, response):
        # print(response.text)
        items = response.xpath('//div[@class="content"]/p/text()').getall()
        text = '\n'.join(items)
        print(text)



