import requests
from bs4 import BeautifulSoup
import re
import time
from queue import Queue
import multiprocessing as mp

h = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'}
# dataQueue = Queue()

def parse_page(url):
    content = requests.get(url, headers=h).content

    # 对页面数据进行解析
    soup = BeautifulSoup(content, 'html5lib')
    store_collist = soup.find('div', class_='store_collist')
    pattern = re.compile(r'bookbox f(.)')
    book_list = store_collist.find_all('div', class_=pattern)
    page_books = []
    for item in book_list:
        book = {}
        book['img'] = item.find('div', class_='bookimg').find('img')['src']
        book['name'] = item.find('div', class_='bookname').find('a').text
        book['intro'] = item.find('div', class_='bookintro').text

        # dataQueue.put(book)
        page_books.append(book)
    
    return page_books

def main(pages):
    pool = mp.Pool()
    base_url = 'http://book.zongheng.com/store/c0/c0/b0/u0/p{}/v9/s9/t0/u0/i1/ALL.html'

    url_list = [base_url.format(i) for i in range(1, pages+1)]
    '''
    url_list = []
    for i in range(1, pages + 1):
        url = base_url.format(i)
        url_list.append(url)
    '''
    multi_res = [pool.apply_async(parse_page, (url,)) for url in url_list]
    pagesBooks = [res.get() for res in multi_res]

    books = []
    for pageBooks in pagesBooks:
        for book in pageBooks:
            books.append(book)
    # print(dataQueue.qsize())
    # with not dataQueue.empty():
    #     book = dataQueue.get()
    #     books.append(book)

    print(len(books), books[-1])

if __name__ == "__main__":
    pages = int(input('输入一个页数：'))
    t1 = time.time()
    main(pages)
    t2 = time.time()
    print(t2-t1)