from selenium import webdriver
import time
import json
from selenium.webdriver.common.by import By
class JdSpider(object):

    def __init__(self):
        self.url = 'https://www.jd.com/'
        self.options = webdriver.ChromeOptions()  # 无头模式
        self.options.add_argument('--headless')
        self.browser = webdriver.Chrome(options=self.options)  # 创建无界面参数的浏览器对象
        self.i = 0  # 计数，一共有多少件商品
        self.list_book = []  # 缓存所有图书数据

    # 输入地址+输入商品+点击按钮，切记这里元素节点是京东首页的输入栏、搜索按钮
    def get_html(self):
        self.browser.get(self.url)
        # self.browser.find_element_by_xpath('//*[@id="key"]').send_keys('Python网络爬虫')
        # self.browser.find_element_by_xpath("//*[@class='form']/button").click()
        self.browser.find_element(By.XPATH, '//*[@id="key"]').send_keys('Python网络爬虫')
        self.browser.find_element(By.XPATH, "//*[@class='form']/button").click()

    # 把进度条件拉倒最底部+提取商品信息
    def get_data(self):
        # 执行js语句，拉动进度条件
        self.browser.execute_script(
            'window.scrollTo(0,document.body.scrollHeight)'
        )
        # 给页面元素加载时预留时间
        time.sleep(1)
        # 用 xpath 提取每页中所有商品，最终形成一个大列表
        # li_list = self.browser.find_elements_by_xpath('//*[@id="J_goodsList"]/ul/li')
        li_list = self.browser.find_elements(By.XPATH, '//*[@id="J_goodsList"]/ul/li')
        for li in li_list:
            # 构建空字典
            item = {}
            # item['name'] = li.find_element_by_xpath('.//div[@class="p-name"]/a/em').text.strip()
            # item['price'] = li.find_element_by_xpath('.//div[@class="p-price"]/strong/i').text.strip()
            # item['count'] = li.find_element_by_xpath('.//div[@class="p-commit"]/strong/a').text.strip()
            # item['shop'] = li.find_element_by_xpath('//div[@class="p-shopnum"]/a').text.strip()
            item['name'] = li.find_element(By.XPATH, './/div[@class="p-name"]/a/em').text.strip()
            item['price'] = li.find_element(By.XPATH, './/div[@class="p-price"]/strong/i').text.strip()
            item['count'] = li.find_element(By.XPATH, './/div[@class="p-commit"]/strong/a').text.strip()
            item['shop'] = li.find_element(By.XPATH, '//div[@class="p-shopnum"]/a').text.strip()

            print(item)
            self.list_book.append(item)  # 将当前图书信息添加到list_book
            self.i += 1  # 图书计数+1

    def save_data(self, items):
        with open("jdbook.json", 'a', encoding='utf-8') as f:
            if type(items) == list:
                for item in items:
                    content = json.dumps(item, ensure_ascii=False)
                    f.write(content + '\n')
            else:
                f.write(items + '\n')
            f.close()

    def run(self):
        # 搜索出想要抓取商品的页面
        self.get_html()
        # 循环执行点击“下一页”操作
        while True:
            # 获取每一页要抓取的数据
            self.get_data()
            # 判断是否是最一页
            if self.browser.page_source.find('pn-next disabled') == -1:
                # self.browser.find_element_by_class_name('pn-next').click()
                self.browser.find_element(By.CLASS_NAME, 'pn-next').click()
                # 预留元素加载时间
                time.sleep(1)
            else:
                print('数量', self.i)
                break
        # 保存数据
        self.save_data(self.list_book)
        self.save_data("共查询到%s本书" % (str(self.i)))
        self.browser.quit()  # 关闭浏览器


if __name__ == '__main__':
    spider = JdSpider()
    spider.run()