'''
@Author: your name
@Date: 2020-03-21 23:23:01
@LastEditTime: 2020-03-22 10:30:29
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \giee\learn_python\爬虫\爬虫基础\练习项目\多线程.py
'''


from threading import Thread
from queue import Queue  # 线程池
import requests
from fake_useragent import UserAgent
from lxml import etree



class CrawInfo(Thread):
    """爬虫类"""

    def __init__(self, url_queue, html_queue):
        """初始化"""
        Thread.__init__(self)
        self.url_queue = url_queue
        self.html_queue = html_queue

    def run(self):
        headers = {
            "User-Agent": UserAgent().chrome
        }

        while self.url_queue.empty()==False:  # 判断线程池是否为空，若为空返回Ture
            response = requests.get(self.url_queue, headers=headers)
            if response.status_code == 200:  # 判断状态
                self.html_queue.put(response.text)


class ParseInfo(Thread):

    def __init__(self, html_queue):
        Thread.__init__(self)
        self.html_queue = html_queue

    def run(self):
        while self.html_queue.empty() == False:
            e = etree.HTML(self.html_queue.get())
            contents = e.xpath('//dd[@class="content"]')



if __name__ == "__main__":

    # 存储url的线程池
    url_queue = Queue()
    # 存储网页内容的线程池
    html_queue = Queue()
    base_url = "http://qiushidabaike.com/index_{}.html"
    for i in range(1, 10):
        new_url = base_url.format(i)
        url_queue.put(new_url)
    # 创建一个爬虫
    # for i in range(3):
    crawl_1 = CrawInfo(url_queue)
    crawl_1.start()