# coding: utf-8
# 文件名称: 冷笑话_spider_多线程练习.py
# 创建时间: 2021/6/25 16:38
import requests
from lxml import etree
import re
import csv
from queue import Queue
import threading


# 生产者
class Procuder(threading.Thread):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36 Edg/91.0.864.48'
    }

    def __init__(self,page_queue, csv_queue,  *args, **kwargs):
        super(Procuder, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.csv_queue = csv_queue

    def run(self):
        while True:
            if self.page_queue.empty():
                break
            url = self.page_queue.get()
            self.parse_page(url)

    def parse_page(self, url):
        # 访问页面
        result = requests.get(url, headers=self.headers)
        text = result.text
        # 解析页面
        html = etree.HTML(text)
        lis = html.xpath('//li[@class="article-summary"]')
        datas = []
        for li in lis:
            title = li.xpath('.//span[@class="article-title"]//text()')[0]
            contents = li.xpath('.//div[@class="summary-text"]//text()')
            content = ''.join(contents).strip()
            content_re = re.sub('\\t', '',content)

            self.csv_queue.put((title, content_re))
            print('生产者', self.csv_queue.empty())

            # xiaohua = {
            #     '标题':title,
            #     '内容':content_re
            # }
            # datas.append(xiaohua)
        print('-'* 30+ f"第{url.split('.')[-2].split('/')[-1]}页下载完成！" + '-'* 30)




# 消费者
class Consumer(threading.Thread):
    def __init__(self,page_queue, csv_queue, writer ,gLock,  *args, **kwargs):
        super(Consumer, self).__init__(*args, **kwargs)
        self.page_queue = page_queue
        self.csv_queue = csv_queue
        self.lock = gLock
        self.writer = writer

    def run(self):
        while True:
            try:
                print('消费者=',self.csv_queue.empty(), self.page_queue.empty())
                if self.csv_queue.empty() and self.page_queue.empty():
                    break
                datas = self.csv_queue.get(timeout=40)
                title, content = datas
                # 保存到csv
                # self.lock.acquire()
                # self.writer.writerow((title, content))
                # self.lock.release()
                print('数据追加成功！')
            except:
                break



def main():
    page_queue = Queue(10)
    csv_queue = Queue(1000)

    gLock = threading.Lock()
    fb = open('xiaohua.csv', 'a', encoding='utf-8-sig', newline='')
    writer = csv.writer(fb)
    writer.writerow(('标题', '内容'))


    for i in range(1, 11):
        url = f'https://xiaohua.zol.com.cn/lengxiaohua/{i}.html'
        page_queue.put(url)

    for x in range(5):
        t = Procuder(page_queue, csv_queue)
        t.start()

    for x in range(5):
        t = Consumer(page_queue, csv_queue, writer, gLock)
        t.start()


if __name__ == '__main__':
    main()
