# -*- coding: utf-8 -*-
"""
@Author: thekernel
@Date: 2020/5/19 08:36
@Description: Thead 类运行 多线程爬虫
"""
from lxml import etree
from fake_useragent import UserAgent
import time
from threading import Thread
from queue import Queue
import requests


class Spider(Thread):
    """
    多线程爬虫
    """

    def __init__(self, queue1, queue2):
        super().__init__()
        self.url_queue = queue1
        self.html_queue = queue2

    def run(self):
        headers = {
            "User-Agent": UserAgent().random
        }

        while not self.url_queue.empty():
            url = self.url_queue.get()  # 从url队列中获取一个url
            response = requests.get(url, headers=headers)
            if response.status_code == 200:  # 请求成功
                self.html_queue.put(response.text)


class ParseHTML(Thread):
    """
    多线程解析网页
    """

    def __init__(self, queue1):
        super().__init__()
        self.html_queue = queue1

    def run(self):
        while not self.html_queue.empty():
            tree = etree.HTML(self.html_queue.get())
            prices = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[2]/strong/i/text()')
            names = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[3]/a/em')
            hrefs = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[1]/a/@href')
            shops = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[5]/span/a/text()')
            with open('data/thread_multi_threading.txt', 'a', encoding="utf-8") as f:
                for name, price, href, shop in zip(names, prices, hrefs, shops):
                    f.writelines(
                        "商品：" + name.xpath('string(.)') + "\t" +
                        "价格：" + price + "\t" +
                        "链接：" + "https:" + href + "\t" +
                        "店铺：" + shop + "\n"
                    )


if __name__ == '__main__':
    keyword = "mac pro"

    start = time.time()
    url_queue = Queue()  # 存储url的队列
    html_queue = Queue()  # 存储html内容的队列

    base_url = "https://search.jd.com/Search?keyword=" + keyword.replace(" ", "%20") + "&page={}"
    for i in range(1, 100):
        new_url = base_url.format(i)
        url_queue.put(new_url)

    # 创建爬虫队列
    crawl_list = []
    for i in range(0, 3):  # 三个爬虫线程
        crawler = Spider(url_queue, html_queue)
        crawl_list.append(crawler)
        crawler.start()  # 启动爬虫线程

    for crawler in crawl_list:
        crawler.join()  # 等待爬虫线程结束

    parser_list = []
    for i in range(0, 3):
        parser = ParseHTML(html_queue)
        parser_list.append(parser)
        parser.start()  # 启动解析网页线程

    for parser in parser_list:
        parser.join()  # 等待解析网页线程结束

    end = time.time()
    print("爬虫运行时间为：%.4f秒" % (end - start))
