# -*- coding: utf-8 -*-
"""
@Author: thekernel
@Date: 2020/5/15 14:36
@Description: 使用 selenium 进行自动翻页
"""
from lxml import etree
from selenium import webdriver
from time import sleep
from fake_useragent import UserAgent
import random
import time


def spider(url):
    firefox_driver = "/Users/thekernel/Code/PythonWorkspace/PythonLib/geckodriver"
    options = webdriver.FirefoxOptions()
    options.add_argument("--headless")
    options.add_argument("user-agent=" + UserAgent().random)
    firefox = webdriver.Firefox(options=options, executable_path=firefox_driver)

    firefox.get(url)
    scroll_end_script = "window.scrollTo(0,document.body.scrollHeight)"  # 跳转到页面底部
    firefox.execute_script(scroll_end_script)
    sleep(3 + random.random())  # 等待3秒让网页数据加载完毕，加随机值反反爬虫

    html = firefox.page_source
    tree = etree.HTML(html)
    total_page = tree.xpath("//*[@id=\"J_bottomPage\"]/span[2]/em[1]/b/text()")[0]

    while True:
        html = firefox.page_source
        tree = etree.HTML(html)
        current_page = tree.xpath("//*[@id=\"J_bottomPage\"]/span[1]/a[@class=\"curr\"]/text()")[0]
        print("正在爬取 第{}页 共{}页".format(current_page, total_page))
        save_page(tree)

        if current_page == total_page:
            break
        next_page_button = firefox.find_element_by_class_name("pn-next")
        next_page_button.click()
        firefox.execute_script(scroll_end_script)

        time.sleep(10 + random.random())


def save_page(tree):
    prices = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[2]/strong/i/text()')
    names = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[3]/a/em')
    hrefs = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[1]/a/@href')
    shops = tree.xpath('//div[@id="J_goodsList"]/ul/li/div/div[5]/span/a/text()')
    with open('data/auto_selenium2.txt', 'a', encoding="utf-8") as f:
        for name, price, href, shop in zip(names, prices, hrefs, shops):
            f.writelines(
                "商品：" + name.xpath('string(.)') + "\t" +
                "价格：" + price + "\t" +
                "链接：" + "https:" + href + "\t" +
                "店铺：" + shop + "\n"
            )


if __name__ == "__main__":
    keyword = "airpods pro"
    url = "https://search.jd.com/Search?keyword=" + keyword.replace(" ", "%20")

    start = time.time()
    spider(url)
    end = time.time()

    print("爬虫运行时间%.4f秒" % (end - start))
