# 导入所需的库
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium import webdriver
from bs4 import BeautifulSoup
from urllib import parse
import time
import pymysql
import platform
import mysql.connector
import lxml
from selenium.webdriver.chrome.service import Service

# -i https://pypi.tuna.tsinghua.edu.cn/simple

# 设置Chrome浏览器选项
chrome_options = webdriver.ChromeOptions()
# service = Service(executable_path=r'C:\\Program Files\\Google\\Chrome\\Application\\chromedriver.exe')

# 启用无头模式（即不打开浏览器界面）
chrome_options.add_argument('--headless')
# 启动Chrome浏览器
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10)

# 获取搜索URL
def get_url(n, word, pinpai):
    print('正在爬取第' + str(n) + '页')
    # 确定搜索商品的内容
    keyword = {'keyword': word}
    # 页面n与参数page的关系
    page = '&page=%s' % (2 * n - 1)
    pinpai = '&ev=exbrand_%s' % (pinpai)
    url = 'https://search.jd.com/Search?' + parse.urlencode(keyword) + pinpai + '&enc=utf-8' + page
    print(url)
    return url

# 解析页面并获取商品信息
def parse_page(url, pinpai):
    print('爬取信息并保存中...')
    browser.get(url)

    # 把滑轮慢慢下拉至底部，触发ajax
    for y in range(100):
        js = 'window.scrollBy(0,100)'
        browser.execute_script(js)
        time.sleep(0.1)

    wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_goodsList .gl-item')))
    html = browser.page_source
    soup = BeautifulSoup(html, 'lxml')

    # 找到所有商品标签
    goods = soup.find_all('li', class_="gl-item")

    # 遍历每个商品，得到每个商品的信息
    for good in goods:
        num = good['data-sku']  # 商品编号
        tag = good.find('div', class_="p-price").strong.em.string  # 商品标签
        money = good.find('div', class_="p-price").strong.i.string  # 商品价格

        # 京东有些商品没有店铺名，导致检索store时找不到对应的节点导致报错
        store = good.find('div', class_="p-shop").span
        commit = good.find('div', class_="p-commit").strong.a.string  # 商品评论数
        name = good.find('div', class_="p-name p-name-type-2").a.em  # 商品名称
        image = good.find('div', class_="p-img").a.img.get('src')  # 商品图片
        detail_addr = good.find('div', class_="p-img").find('a')['href']  # 商品详情页链接

        # 如果商品有店铺名，则获取店铺名，否则标记为"没有找到店铺"
        if store is not None:
            new_store = store.a.string
        else:
            new_store = '没有找到店铺 - -！'

        # 拼接商品名称
        new_name = ''
        for item in name.strings:
            new_name = new_name + item

        # 保存商品信息到MySQL数据库
        product = (num, pinpai, new_name, money, new_store, commit, image, detail_addr)
        # save_to_mysql(product)
        print(product)

# 获取操作系统类型
def get_operating_system():
    system_name = platform.system()
    if system_name == "Windows":
        return "Windows"
    elif system_name == "Linux":
        return "Linux"
    else:
        return "Other"

# 连接数据库
def connect_db():
    os_name = get_operating_system()
    if "Windows" == os_name:
        host = '127.0.0.1'  # Windows环境下使用localhost
    else:
        host = '10.8.688.53'  # Linux环境下的数据库IP
    return mysql.connector.connect(
        host=host,
        user="root",  # 数据库用户名
        password="123456",  # 数据库密码
        database="jd"  # 数据库名称
    )

# 将商品信息保存到MySQL数据库
def save_to_mysql(result):
    db = connect_db()
    cursor = db.cursor()  # 使用cursor()方法获取操作游标

    # 插入商品信息的SQL语句
    sql = """INSERT INTO information(info_num, info_brand, info_name, info_money, 
             info_store, info_commit, info_image, info_detail) 
             VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')""" % \
          (result[0], result[1], result[2], result[3], result[4], result[5], result[6], result[7])

    try:
        cursor.execute(sql)  # 执行sql语句
        db.commit()  # 提交到数据库执行
        print('保存成功！')
    except:
        db.rollback()  # 发生错误时回滚
        print('保存失败！')

    db.close()  # 关闭数据库连接

# 主函数，控制程序的执行
def main():
    try:
        # 用户输入商品名称、品牌和页面数
        word = '手机'
        pinpai = 'Apple'
        pages = 10

        # 判断输入的页面数是否在有效范围内
        if 1 <= pages <= 100:
            page = pages + 1
            for n in range(1, page):
                url = get_url(n, word, pinpai)  # 获取商品搜索页面的URL
                parse_page(url, pinpai)  # 解析页面并获取商品信息
            print('爬取完毕！')
            browser.close()  # 关闭浏览器
        else:
            print('请重新输入！')
            main()  # 如果输入无效，重新执行主函数
    except Exception as error:
        print('出现异常！', error)
        return None

# 程序入口
if __name__ == '__main__':
    main()
