"""
生产者：负责生产任务（一个爬虫，可以爬取批量网址）
"""
import requests
from bs4 import BeautifulSoup
import redis
import json
client = redis.Redis()
client.delete("books")

url = f"https://www.zongheng.com/api2/catefine/storeSearch"
for page in range(134, 501):
	print(f"正在爬取第{page}页111111111111111111111")
	data = {
		'worksTypes': '0',
		'bookType': '1',
		'subWorksTypes': '0',
		'totalWord': '0',
		'serialStatus': '9',
		'vip': '9',
		'totalWold': '0',
		'pageNum': f'{page}',
		'pageSize': '20',
		'categoryId': '0',
		'categoryPid': '0',
		'order': 'weekOrder',
		'naodongFilter': '0',
	}
	res = requests.post(url, data=data)
	if res.status_code == 200:
		items = res.json()['result']['bookList']
		for item in items:

			book = {
				"id": item['bookId'],
				"name": item['name'],
				"description": item['description'],
				"chapter_list": []
			}
			chapter_res = requests.get(f"https://huayu.zongheng.com/showchapter/{item['bookId']}.html")
			soup = BeautifulSoup(chapter_res.text, features='lxml')
			chapter_list = soup.find("ul", attrs={"class": "chapter-list"})
			if chapter_list:
				for li in chapter_list.find_all("li"):
					if li.get("class") == ["vip", "col-4"]:
						pass
					else:
						info = li.find("a")
						chapter_href = info.get("href")
						chapter_title = info.get_text()
						book['chapter_list'].append({
							"title": chapter_title,
							"href": chapter_href
						})

				client.lpush("books", json.dumps(book, ensure_ascii=False))
			else:
				print(f"{chapter_res.url} 没有正常内容、、、、、、、、、、、、、、、、、、、")


	# 只取第一页
	# break


client.close()