# -*- coding:utf-8 -*-

from queue import Queue
from threading import Lock, Thread
import time
import requests
from lxml import etree

requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
生产者消费者模型Queue
爬数据。首先，填充将待爬取的页面的url填充到队列。在有生产者请求数据parse_page。消费者监听到队列item_queue有数据进行保存
"""


# 生产者
class Producer(Thread):
    def __init__(self, threadID, threadName, page_queue, item_queue):
        # Thread.__init__(self)
        super().__init__()
        self.threadID = threadID
        self.threadName = threadName
        self.page_queue = page_queue
        self.item_queue = item_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.parse_page(self.threadName, url)

    # 解析数据
    def parse_page(self, threadName, url):
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
        }

        res = requests.get(url, headers=headers, verify=False)
        selector = etree.HTML(res.text)
        dict_data = dict()
        dict_data['商品名称'] = "".join(selector.xpath('//span[@id="lblProductName"]/text()')).replace("标准品", "").strip()
        self.item_queue.put(dict_data)
        print("%s: Producer Processing %s" % (threadName, url))


# 消费者
class Consumer(Thread):
    def __init__(self, threadID, threadName, page_queue, item_queue):
        # Thread.__init__(self)
        super().__init__()
        self.threadID = threadID
        self.threadName = threadName
        self.page_queue = page_queue
        self.item_queue = item_queue

    def run(self):
        while True:
            if self.page_queue.empty() and self.item_queue.empty():
                break
            data = self.item_queue.get()
            self.save_data(self.threadName, data)

    # 保存数据
    def save_data(self, threadName, itemData):
        print("%s: Consumer Processing %s" % (threadName, str(itemData)))


def get_urls():
    urlList = [
        'https://www.anpel.com.cn/products/1129895.html',
        'https://www.anpel.com.cn/products_1439341.html',
        'https://www.anpel.com.cn/products_971157.html',
        'https://www.anpel.com.cn/products/971756.html',
        'https://www.anpel.com.cn/products_6003.html',
        'https://www.anpel.com.cn/products_1354243.html',
        'https://www.anpel.com.cn/products/971703.html',
        'https://www.anpel.com.cn/products_1398391.html',
        'https://www.anpel.com.cn/products/6017.html',
        'https://www.anpel.com.cn/products/6011.html',
        'https://www.anpel.com.cn/products_6044.html',
        'https://www.anpel.com.cn/products_1426242.html',
        'https://www.anpel.com.cn/products/49209.html',
        'https://www.anpel.com.cn/products/6045.html',
        'https://www.anpel.com.cn/products/49210.html',
        'https://www.anpel.com.cn/products_6043.html',
        'https://www.anpel.com.cn/products/6035.html',
        'https://www.anpel.com.cn/products/6034.html',
        'https://www.anpel.com.cn/products/6005.html',
        'https://www.anpel.com.cn/products/48598.html',
        'https://www.anpel.com.cn/products/6032.html',
        'https://www.anpel.com.cn/products/1348782.html',
        'https://www.anpel.com.cn/products/5405.html',
        'https://www.anpel.com.cn/products_1439340.html',
        'https://www.anpel.com.cn/products/48638.html',
        'https://www.anpel.com.cn/products/971774.html',
        'https://www.anpel.com.cn/products/6022.html',
        'https://www.baidu.com/'
    ]

    return urlList


if __name__ == "__main__":
    threadsList = []  # 线程列表
    threadNum = 10  # 线程数量
    queueNum = 100  # 队列数量
    # 队列大小
    page_queue = Queue(queueNum)
    item_queue = Queue(queueNum)

    # 填充队列 将链接地址放入队列
    for u in get_urls():
        page_queue.put(u)

    # 生产者线程
    for n_p in range(1, threadNum + 1):
        t1 = Producer(n_p, "Thread-" + str(n_p), page_queue, item_queue)
        threadsList.append(t1)

    # 消费者线程
    for n_c in range(1, threadNum + 1):
        t2 = Consumer(n_c, "Thread-" + str(n_c), page_queue, item_queue)
        threadsList.append(t2)

    # 开启线程
    for t in threadsList:
        t.start()

    # 等待所有线程完成
    for t in threadsList:
        t.join()

    threadsList.clear()
    print("退出主线程")


