"""
生产者：负责爬取任务（爬取生产者爬取的网址）
"""
import requests
import redis
import json
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import os
import pymongo
mongo_client = pymongo.MongoClient(host="192.168.11.155")
db = mongo_client.get_database("纵横小说")
collection = db.get_collection("女生完本")


client = redis.Redis(host="192.168.11.155")


def fetch():
	while client.llen("books") > 0:
		item_bytes = client.lpop("books")
		item_str = item_bytes.decode()
		item = json.loads(item_str)
		print(item)
		book = {
			"id": item['id'],
			"name": item['name'],
			"description": item['description'],
			"chapters":[]
		}
		for chapter in item['chapter_list']:
			print(chapter['title'], chapter['href'])
			chapter_res = requests.get(chapter['href'])
			chapter_soup = BeautifulSoup(chapter_res.text, features="lxml")
			chapter_container = chapter_soup.find("div", attrs={"class": "content"})
			chapter_ps = chapter_container.find_all("p")
			content = "\n".join(content_p.text for content_p in chapter_ps)
			print(f"章节爬取成功：{chapter['title']}   正文{content}、、、、、、、、、")
			book['chapters'].append({
				"chapter_title":chapter['title'],
				"chapter_content": content
			})
		collection.insert_one(book)


# 使用线程池进行并发爬取
def main(max_workers=12):
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交任务给线程池
        futures = [executor.submit(fetch)]
        # 等待所有任务完成
        for future in futures:
            try:
                # 调用 result() 方法获取任务执行的结果
                future.result()
            except Exception as e:
                print(f"Error during execution: {e}")

# 运行线程池爬虫
if __name__ == "__main__":
    main( max_workers=os.cpu_count())  # 控制线程池中最大并发线程数为3
	# client.close()

