from threading import Thread
from queue import Queue
from fake_useragent import UserAgent
import requests
from lxml import etree


# 多线程的使用

# 爬虫线程类
class CrawlerInfo(Thread):
    def __init__(self, url_queue, html_queue):
        Thread.__init__(self)
        self.url_queue = url_queue
        self.html_queue = html_queue

    def run(self):
        headers = {
            "User-Agent": UserAgent().chrome
        }
        while not self.url_queue.empty():
            response = requests.get(self.url_queue.get(), headers=headers)
            if response.status_code == 200:
                self.html_queue.put(response.text)


# 解析类
class ParseInfo(Thread):
    def __init__(self, html_queue):
        Thread.__init__(self)
        self.html_queue = html_queue

    def run(self):
        while not self.html_queue.empty():
            html = etree.HTML(self.html_queue.get())
            span_contents = html.xpath("//div[@class='content']/span[1]")
            with open("duanzi.txt", "a", encoding="utf-8") as f:
                for span in span_contents:
                    info = span.xpath("string(.)")
                    f.write(info)


if __name__ == '__main__':
    # 存储url队列
    url_queue = Queue()
    # 存储结果内容队列
    html_queue = Queue()
    base_url = "https://www.qiushibaike.com/text/page/"
    for num in range(1, 16):
        new_url = f"{base_url}{num}"
        url_queue.put(new_url)
    # 创建一个爬虫类
    crawler_list = []
    for i in range(0, 3):
        crawler = CrawlerInfo(url_queue, html_queue)
        crawler_list.append(crawler)
        crawler.start()
    for i in crawler_list:
        i.join()
    parse_list = []
    for i in range(0, 3):
        parse = ParseInfo(html_queue)
        parse_list.append(parse)
        parse.start()
    for i in parse_list:
        i.join()
