import asyncio

import requests
from fake_useragent import UserAgent
import aiohttp
import pymongo
from lxml import etree

ua = UserAgent()
client = pymongo.MongoClient('localhost', 27017)
client.drop_database('novel_db')
db = client.get_database('novel_db')
db.drop_collection('novel')
connection = db.get_collection('novel_data_page')

url = ' https://www.zongheng.com/api2/catefine/storeSearch'

# 修改为字典格式，适应请求的参数
data3 = []

for i in range(1, 501):
    data = {
        "worksTypes": 0,
        "bookType": 1,
        "subWorksTypes": 0,
        "totalWord": 0,
        "serialStatus": 1,
        "vip": 9,
        "pageNum": i,
        "pageSize": 20,
        "categoryId": 0,
        "categoryPid": 0,
        "order": "weekOrder",
        "naodongFilter": 0
    }
    data3.append(data)




async def fetch_page(data1, session):
    async with session.post(url, data=data1, headers={'User-Agent': ua.random}) as res:
        items1 = await res.json()
        items = items1['result']['bookList']
        for item in items:
            name = item['name']
            book_id = item['bookId']
            # print(name, book_id)
            name_url = f'https://huayu.zongheng.com/showchapter/{book_id}.html'  # 进入对应小说的详情页
            # print(name_url)
            # break
            print(f'正在加载')
            name_res = requests.get(name_url, headers={'User-Agent': ua.random})
            items = name_res.text
            # print(items)
            # break

            tree = etree.HTML(items)
            items = tree.xpath('//ul[@class="chapter-list clearfix"]/li')[:11]
            data2 = []
            for item in items:
                name1 = item.xpath('./a/text()')[0]  # 小说章节
                name2 = item.xpath('./a/@href')[0]  # 小说章节的url
                # print(name2)
                conect1 = requests.get(name2, headers={'User-Agent': ua.random})
                conect2 = conect1.text
                tree = etree.HTML(conect2)
                title = tree.xpath('//div[@class="title_txtbox"]/text()')[0]
                print(title)
                items = tree.xpath('//div[@class="content"]/p/text()')[0][:30]
                print(items)
                data2.append({
                    'name': name1,
                    'title': title,
                    'items': items
                })
            connection.insert_many(data2)
            # for item in items:
            #     name1 = item.xpath('./p/text()')[0]
            #     print(name1)


async def task():  # 这里 task() 是一个异步函数（协程），用于执行一个任务。
    async with aiohttp.ClientSession() as session:
        while data3:
            d = data3.pop()
            await fetch_page(d, session)


async def main():
    await asyncio.gather(*[asyncio.create_task(task()) for i in range(10)])  # 这个代码确实是用来创建并并发执行多个协程的。


asyncio.run(main())