import json
import time
from lxml import etree
from selenium import webdriver
import requests
from selenium.webdriver.common.by import By
import re
import pymysql



def selenium_w():
    # 去除正在被自动化测试工具操作
    options = webdriver.ChromeOptions()
    options.add_experimental_option('excludeSwitches', ['enable-automation'])
    options.add_argument('--incognito')
    driver=webdriver.Chrome(executable_path='D:\BaiduNetdiskDownload\爬虫\代码\接单考核\chromedriver.exe',options=options)
    # 向有图书关键字对应网页的url发送请求
    driver.get("https://www.jd.com/")
    driver.find_element(By.ID,"key").send_keys('创维电视')
    driver.implicitly_wait(2)
    driver.find_element(By.CLASS_NAME,'button').click()
    # 输入商品名称以及点击

    driver.maximize_window()
    driver.implicitly_wait(2)
    # 只能滑动一点 因此使用循环进行 滑动
    for i in range(10):
        js="window.scrollBy(0,600)"
        driver.execute_script(js)
        driver.implicitly_wait(2)
#     返回网页源代码的操作
    data=driver.page_source
    # print(data)
    driver.close()
    driver.quit()
    clear_data(data)
# 定义清洗数据的函数
def clear_data(data):
    # 分析数据html(str)文本格式类型 转换为可通过xpath提取数据的对象
    html=etree.HTML(data)
    # 品牌，正式商品名称和网页商品名称，京东价，促销，商品编号，优惠劵，店铺名称，商品第一张主图片，累计评价 
#     找到包含60本书的共同节点
    urls=html.xpath('//*[@id="J_goodsList"]/ul/li[*]/div/div[1]/a/@href')

#     遍历共同节点
    for url in urls:
        url = 'https:'+ url
        options = webdriver.ChromeOptions()
        options.add_argument('--headless')
        options.add_argument('--disable-gpu')
        driver = webdriver.Chrome(executable_path='D:\BaiduNetdiskDownload\爬虫\代码\接单考核\chromedriver.exe',
                                  options=options)
        # 向有图书关键字对应网页的url发送请求
        driver.get(url)

        driver.implicitly_wait(10)

        driver.maximize_window()

        for i in range(10):
            js = "window.scrollBy(0,600)"
            driver.execute_script(js)
            driver.implicitly_wait(2)
        #     返回网页源代码的操作
        data = driver.page_source
        # print(data1)
        html1 = etree.HTML(data)
        # # 店铺名称
        store = html1.xpath('//*[@id="crumb-wrap"]/div/div[2]/div[2]/div[1]/div/a/text()')
        print(store)
        # 正式商品名称
        real_name = html1.xpath('//*[@id="detail"]/div[2]/div[1]/div[1]/ul[2]/li[1]/@title')
        print(real_name)
        # 网页商品名称   这里需要数据清洗 删除空格
        net_name = html1.xpath('/html/body/div[6]/div/div[2]/div[1]/text()')
        net_name = str(net_name).strip().replace("\'","").strip()
        print(net_name)
        # 品牌
        brand = html1.xpath('//*[@id="parameter-brand"]/li/a/text()')
        print(brand)
        # 商品编号
        number = html1.xpath('//*[@id="detail"]/div[2]/div[1]/div[1]/ul[2]/li[2]/@title')
        print(number)
        # 商品第一张主图片
        src = html1.xpath('//*[@id="spec-img"]/@data-origin')
        src[0]="https:"+src[0]
        print(src)
        # 京东价
        jd_price = html1.xpath('//*[@id="fittings"]/div[2]/div[3]/div[2]/strong/text()')
        print(jd_price)

        # 促销
        zengping = html1.xpath('//*[@id="prom-gift"]/div/div/span/em/text()')
        zengping1 = html1.xpath('//*[@id="prom-gift"]/div/div/div/div[1]/a/@title')
        manjian = html1.xpath('//*[@id="prom"]/div/div/em[1]/text()')
        manjian1 = html1.xpath('//*[@id="prom"]/div/div/em[2]/text()')
        item1 ={
            "cuxiao":str(zengping)+str(zengping1)+str(manjian)+str(manjian1)
        }
        print(item1)

        # 累计评价
        shuliang = html1.xpath('//*[@id="comment-count"]/a/text()')[0]
        item2 = {
            "conment" :shuliang
        }
        print(item2)

        # 优惠券
        # discount = html1.xpath('//*[@id="prom"]/div/div/em[2]/text()')[0]
        # if discount==[]:
        #     discount.append("该商品没有优惠券！")

#     # 处理空数据！！！ 修改xpath语法重新进行xpath提取
        item={"store":store,
              "real_name":real_name,
              "net_name":net_name,
              "brand":brand,
              "number":number,
              "src":src,
              "promotion":str(item1['cuxiao']),
              "jd_price":jd_price,
              "conment":str(item2['conment'])
        }
        # print(item)
#         bianli(item)
#
# def bianli(item):
#     for i in range(60):  # 一个页面有60条信息
        store = str(item['store'])
        real_name = str(item['real_name'])
        net_name = str(item['net_name'])
        brand = str(item['brand'])
        number = str(item['number'])
        promotion = str(item['promotion'])
        src = str(item['src'])
        jd_price = str(item['jd_price'])
        conment = str(item['conment'])
        print(store, brand, number, real_name, net_name, src, jd_price, promotion, conment)
#         chushihua(store,brand,number,real_name,net_name,src,jd_price,promotion,conment)
#
# def chushihua(store,brand,number,real_name,net_name,src,jd_price,promotion,conment):
#         db = pymysql.connect(host='localhost', user='root', password='123456', port=3306, charset='utf8',db="jd_data")
#         cursor = db.cursor()
#         # cursor.execute("create database jd_data;")
#         # cursor.execute("use jd_data;")
#         # cursor.execute("create table jd(店铺名称 varchar(255),品牌 varchar(255),商品编号 varchar(255),正式商品名称 varchar(255),网页商品名称 varchar(255),商品第一张主图片 varchar(255),京东价 varchar(255),促销 varchar(255),累积评价 varchar(255));")
#         sql = f"""insert into jd(店铺名称,品牌,商品编号,正式商品名称,网页商品名称,商品第一张主图片,京东价,促销,累积评价) values ('{store}','{brand}','{number}','{real_name}','{net_name}','{src}','{jd_price}','{promotion}','{conment}')"""
#         cursor.execute(sql)
#         db.commit()
#         db.close()
def run():
    # num=int(input("请输入页数:"))
    # for i in range(1,num+1):
    #     selenium_w(i)
    selenium_w()

if __name__ == '__main__':
    run()















