# _*_ coding : utf-8 _*_
# @Time : 2023/3/21 0021 20:22
# @Author : 菜鸟王小二
# @File : 37_爬取京东数据
# @Project : python爬虫
import time
import pandas as pd

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys

# 有报错需要安装openpyxl，安装后就不再报错，可以存储数据

def jdSpider(text,page_size):

    path = Service('E:/chromedriver/chromedriver.exe')
    driver = webdriver.Chrome(service=path)
    driver.get('https://www.jd.com')

    time.sleep(3)

    input_lable = driver.find_element(By.ID, 'key')
    input_lable.send_keys(text)
    # 此方法是在输入框输入数据后，点击enter键
    input_lable.send_keys(Keys.ENTER)

    prices, names, commits, shops = [], [], [], []
    # 循环所要爬取的页面
    for i in range(page_size):
        # 执行js脚本，将页面滑到最下面
        driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
        time.sleep(3)

        good_list = driver.find_elements(By.XPATH, '//*[@id="J_goodsList"]/ul/li')
        for good in good_list:
            price = good.find_element(By.CLASS_NAME, 'p-price').text
            name = good.find_element(By.CLASS_NAME, 'p-name').text
            commit = good.find_element(By.CLASS_NAME, 'p-commit').text
            shop = good.find_element(By.CLASS_NAME, 'p-shop').text

            prices.append(price)
            names.append(name)
            commits.append(commit)
            shops.append(shop)

        driver.find_element(By.CLASS_NAME,'pn-next').click()
        time.sleep(3)

    # 存数据到excel表格
    df = pd.DataFrame({
        '价格': prices,
        '标题': names,
        '评论': commits,
        '商家': shops
    })
    df.to_excel('1.xlsx')



if __name__ == '__main__':
    text = input('请输入你要爬取的数据：')
    page_size = int(input('请输入你要爬取的页数：'))
    jdSpider(text,page_size)