# _*_ coding : utf-8 _*_
# @Time : 2023/3/28 0028 11:25
# @Author : 菜鸟王小二
# @File : 39_爬取唯品会数据_selenium
# @Project : python爬虫
import time

from selenium.webdriver import Keys
from selenium.webdriver.chrome.service import Service
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_helper import get_webdriver
import pandas as pd


def crawlVip(crawl_data, crawl_page):
    # path = Service('E:\chromedriver\chromedriver.exe')
    # driver = webdriver.Chrome(service=path)

    # 可以下载webdriver_helper==1.0.1免费版，导入get_webdriver，此插件自动获取浏览器驱动、版本
    with get_webdriver() as driver:
        driver.get('https://www.vip.com/')
        # 窗口最大化
        # driver.maximize_window()
        # 添加隐性等待
        driver.implicitly_wait(5)

        element_input = driver.find_element(By.XPATH, '//input[@class="c-search-input  J-search-input"]')
        element_input.send_keys(crawl_data)
        # 等待2s后按enter键，否则页面不会跳转，网页速度没有那么快
        time.sleep(2)
        element_input.send_keys(Keys.ENTER)
        time.sleep(1)

        # 添加循环做多页爬取
        for i in range(crawl_page):
            # 滑到最下面
            driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
            time.sleep(2)
            driver.execute_script('window.scrollTo(document.body.scrollHeight/2,document.body.scrollHeight)')
            time.sleep(2)

            img_list = driver.find_elements(By.XPATH, '//*[@id="J_searchCatList"]/div/a/div[1]/div[1]')
            # 列表推导式将数据保存到列表中
            alt_list = [img.find_element(By.TAG_NAME, 'img').get_attribute('alt') for img in img_list]
            src_list = ['https:' + img.find_element(By.TAG_NAME, 'img').get_attribute('data-original') for img in
                        img_list]

            # print(alt_list)

            price_list, marketPrice_list, saleDiscount_list = [], [], []
            data_list = driver.find_elements(By.XPATH,
                                             '//div[@class="c-goods-item  J-goods-item c-goods-item--auto-width"]/a/div[2]/div[1]')
            for data in data_list:
                price = data.find_element(By.CLASS_NAME, 'c-goods-item__sale-price').text
                marketPrice = data.find_element(By.CLASS_NAME, 'c-goods-item__market-price').text
                saleDiscount = data.find_element(By.CLASS_NAME, 'c-goods-item__discount').text

                price_list.append(price)
                marketPrice_list.append(marketPrice)
                saleDiscount_list.append(saleDiscount)
            print(price_list, marketPrice_list, saleDiscount_list)

            # 对是否是爬取的最后一页做判断，如果不等于最后一页，继续点击下一页，直到等于最后一页，不再点击跳转下一页
            if i != crawl_page - 1:
                driver.find_element(By.XPATH, '//div[@id="J_pagingCt"]//a[@class="cat-paging-next"]').click()

            df = pd.DataFrame({
                '标题': alt_list,
                '链接': src_list,
                '价格': price_list,
                '原价': marketPrice_list,
                '折扣': saleDiscount_list
            })
            try:
                # 需要有个seleniumJd.xlsx的表格，不然找不到文件会报错
                # 数据追加
                original_data = pd.read_excel('seleniumJd.xlsx')
                # pd.concat()方法将新数据和旧数据合并
                # axis=0代表上下堆叠，axis=1为左右拼接
                save_data = pd.concat([original_data, df], axis=0)
                save_data.to_excel('seleniumJd.xlsx', index=False, sheet_name='selenium自动爬取京东数据')
            except FileNotFoundError:
                print('未找到文件：seleniumJd.xlsx！请新建此文件！')


if __name__ == '__main__':
    crawl_data = input("请输入你要爬取的数据：")
    crawl_page = int(input('请输入你要爬取的页数：'))
    crawlVip(crawl_data, crawl_page)
