# -*- coding:utf-8 -*-

from queue import Queue
from threading import Lock, Thread
import time
import requests
from lxml import etree
import csv

from utils.HttpUtils import HttpUtils
from 生产消费者模式.GetDetail import GetDetail

requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
生产者消费者模型Queue
爬数据。首先，填充将待爬取的页面的url填充到队列。在有生产者请求数据parse_page。消费者监听到队列item_queue有数据进行保存
"""


# 生产者
class Producer(Thread):
    def __init__(self, threadID, threadName, page_queue, item_queue):
        # Thread.__init__(self)
        super().__init__()
        self.threadID = threadID
        self.threadName = threadName
        self.page_queue = page_queue
        self.item_queue = item_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.parse_page(self.threadName, url)

    # 解析数据
    def parse_page(self, threadName, url):
        dict_data = dict()
        dict_data = GetDetail.get_contents(url)
        self.item_queue.put(dict_data)
        print("%s: Producer Processing %s" % (threadName, url))


# 消费者
class Consumer(Thread):
    def __init__(self, threadID, threadName, page_queue, item_queue):
        # Thread.__init__(self)
        super().__init__()
        self.threadID = threadID
        self.threadName = threadName
        self.page_queue = page_queue
        self.item_queue = item_queue

    def run(self):
        while True:
            if self.page_queue.empty() and self.item_queue.empty():
                break
            data = self.item_queue.get()
            self.save_data(self.threadName, data)

    # 保存数据
    def save_data(self, threadName, itemData):
        # 保存数据
        HttpUtils.dict_to_csv("阿尔塔_无", itemData)
        print("%s: Consumer Processing %s" % (threadName, str(itemData)))


if __name__ == "__main__":
    threadsList = []  # 线程列表
    threadNum = 10  # 线程数量
    queueNum = 10000  # 队列数量
    # 队列大小
    page_queue = Queue(queueNum)
    item_queue = Queue(queueNum)
    # 查询列表
    list_urls = []
    list_urls = GetDetail.get_urls()

    # 填充队列 将链接地址放入队列
    for u in list_urls:
        page_queue.put(u)

    # 生产者线程
    for n_p in range(1, threadNum + 1):
        t1 = Producer(n_p, "Thread-" + str(n_p), page_queue, item_queue)
        threadsList.append(t1)

    # 消费者线程
    for n_c in range(1, threadNum + 1):
        t2 = Consumer(n_c, "Thread-" + str(n_c), page_queue, item_queue)
        threadsList.append(t2)

    # 开启线程
    for t in threadsList:
        t.start()

    # 等待所有线程完成
    for t in threadsList:
        t.join()

    threadsList.clear()
    print("退出主线程")


