import queue
import random
import threading
import time

import requests
from bs4 import BeautifulSoup

def craw(url):
    r = requests.get(url)
    return r.text

def do_craw(url_queue:queue.Queue, html_queue:queue.Queue):
    while True:
        url = url_queue.get()
        html = craw(url)
        html_queue.put(html)
        print(threading.current_thread().name,f"do_craw {url},size {url_queue.qsize()}")
        time.sleep(random.randint(1, 2))

def parser(html):
    soup = BeautifulSoup(html, 'html.parser')
    links = soup.find_all('a', 'post-item-title') # 标签，class
    return [(link['href'], link.get_text()) for link in links]

def do_parser(html_queue:queue.Queue):
    while True:
        html = html_queue.get()
        results = parser(html)
        for result in results:
            print(threading.current_thread().name, result,f"html_queue size:{html_queue.qsize()}")
        time.sleep(random.randint(1, 2))


if __name__ == '__main__':
    url_queue = queue.Queue()
    html_queue = queue.Queue()
    urls = [
        f"https://www.cnblogs.com/#p{page}"
        for page in range(1, 10 + 1)
    ]
    for url in urls:
        url_queue.put(url)

    for idx in range(3):
        t = threading.Thread(target=do_craw, args=(url_queue,html_queue), name = f'do_craw{idx}')
        t.start()

    for idx in range(2):
        t = threading.Thread(target=do_parser, args=(html_queue,), name=f'do_parser{idx}')
        t.start()
