from queue import Queue
from pratice_threading import blog_spider
import time
from random import randint
import threading


def blog_producer(url_queue: Queue, html_queue: Queue):
    while True:
        html_url = url_queue.get()   # 获取url， 同时url队列长度-1
        html_response = blog_spider.get_blog_response(html_url)   # 获取对应url的html文本
        print('线程名：%s, url队列长度：%s, html队列长度：%s' %
              (threading.current_thread().name, url_queue.qsize(), html_queue.qsize()))
        html_queue.put(html_response)   # 将获取到的html文本添加到html队列中，html队列长度+1
        time.sleep(randint(1, 2))


def blog_consumer(html_queue: Queue, output_file):
    while True:
        html_response = html_queue.get()         # 从html队列中获取html文本信息
        results = blog_spider.get_blog_links(html_response)   # 处理html文本信息，得到一个列表，列表元素：超链接url和标题
        print('线程名：%s, html队列长度：%s, 超链接个数：%s' %
              (threading.current_thread().name, html_queue.qsize(), len(results)))
        for result in results:
            output_file.write(str(result) + '\n')          # 将获取到的超链接url和标题写入到文本文档中
        time.sleep(randint(1, 2))


if __name__ == '__main__':
    temp_url_queue = Queue()
    temp_html_queue = Queue()
    for url in blog_spider.urls:
        temp_url_queue.put(url)

    for idx in range(2):
        t = threading.Thread(target=blog_producer, args=(temp_url_queue, temp_html_queue), name=f'producer{idx}')
        t.start()

    temp_output_file = open('blog_output.txt', 'w')
    for idx in range(1):
        t = threading.Thread(target=blog_consumer, args=(temp_html_queue, temp_output_file), name=f'consumer{idx}')
        t.start()