import requests
from queue import Queue
from bs4 import BeautifulSoup
import re
import gevent
import time

class Spider:
    def __init__(self):
        self.headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'}
        # 基准网址
        self.base_url = 'http://book.zongheng.com/store/c0/c0/b0/u0/p{}/v9/s9/t0/u0/i1/ALL.html'
        # 数据队列
        self.dataQueue = Queue()
        # 统计数量
        # self.count = 0
        # 数据列表
        self.books = []

    # 获取一页数据的方法
    def get_page_books(self, url):
        content = requests.get(url, headers=self.headers).content

        # 对页面数据进行解析
        soup = BeautifulSoup(content, 'html5lib')
        store_collist = soup.find('div', class_='store_collist')
        pattern = re.compile(r'bookbox f(.)')
        book_list = store_collist.find_all('div', class_=pattern)

        for item in book_list:
            book = {}
            book['img'] = item.find('div', class_='bookimg').find('img')['src']
            book['name'] = item.find('div', class_='bookname').find('a').text
            book['intro'] = item.find('div', class_='bookintro').text
            
            self.dataQueue.put(book)

    def start_work(self, pageNum):
        job_list = []
        for page in range(1, pageNum+1):
            url = self.base_url.format(page)
            # 创建协程任务
            job = gevent.spawn(self.get_page_books, url)
            # 把所有协程任务加入任务列表
            job_list.append(job)

        # 等待所有协程执行完毕
        gevent.joinall(job_list)

        while not self.dataQueue.empty():
            book = self.dataQueue.get()
            self.books.append(book)

if __name__ == "__main__":
    pages = int(input('请输入页码：'))
    t1 = time.time()
    spider = Spider()
    spider.start_work(pages)
    print(len(spider.books), spider.books[-1])
    t2 = time.time()
    print(t2 - t1)