# -*- coding: utf-8 -*-
"""
@Author: thekernel
@Date: 2020/5/15 13:09
@Description: Pool 多线程爬虫
"""
from lxml import etree
from selenium import webdriver
from time import sleep
from fake_useragent import UserAgent
import random
import time

from multiprocessing import Pool


def spider(url):
    firefox_driver = "/Users/thekernel/Code/PythonWorkspace/PythonLib/geckodriver"
    options = webdriver.FirefoxOptions()
    options.add_argument("--headless")
    options.add_argument("user-agent=" + UserAgent().random)
    firefox = webdriver.Firefox(options=options, executable_path=firefox_driver)

    firefox.get(url)
    js = "window.scrollTo(0,document.body.scrollHeight)"  # 跳转到页面底部
    firefox.execute_script(js)
    sleep(3 + random.random())  # 等待3秒让网页数据加载完毕，加随机值反反爬虫

    html = firefox.page_source
    tree = etree.HTML(html)

    save_page(tree)


def save_page(tree):
    prices = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[2]/strong/i/text()')
    names = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[3]/a/em')
    hrefs = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[1]/a/@href')
    shops = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[5]/span/a/text()')
    with open('data/airpods_pro.txt', 'a', encoding="utf-8") as f:
        for name, price, href, shop in zip(names, prices, hrefs, shops):
            f.writelines(
                "商品：" + name.xpath('string(.)') + "\t" +
                "价格：" + price + "\t" +
                "链接：" + "https:" + href + "\t" +
                "店铺：" + shop + "\n"
            )


if __name__ == "__main__":
    keyword = "airpods pro"
    url = "https://search.jd.com/Search?keyword=" + keyword.replace(" ", "%20") + "&page={}"

    start = time.time()
    args = [url.format(i) for i in range(1, 101)]
    pool = Pool(processes=8)
    pool.map(spider, args)
    end = time.time()

    print("爬虫运行时间%.4f秒" % (end - start))
