from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from pyquery import PyQuery as pq
import json
import re
import time
import os
import csv
import pymysql
import sys

# 数据库配置
host = '127.0.0.1' # localhost:本地数据库
user = 'root' # 用户名
password = '123456' # 密码
database = 'crawler' # 数据库名称
charset = 'utf8' # 编码方式

# 创建数据库连接
conn = pymysql.connect(host=host, user=user, password=password
                           , database=database, charset=charset)
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)

# 爬虫设置
browser=webdriver.Chrome()
browser.maximize_window()   #将窗口最大化
wait=WebDriverWait(browser, 10)

# 搜索
def search(keyboard):

    browser.get("https://www.jd.com/")
    input=wait.until(
        EC.presence_of_element_located((By.CSS_SELECTOR, "#key"))   #获取输入框
    )
    submit=wait.until(
        EC.element_to_be_clickable((By.CSS_SELECTOR,"#search > div > div.form > button > i"))   #获取搜索按钮
    )
    input.send_keys(keyboard)
    submit.click()
    # jiexi_page()

def next_page(page_number,keyboard):
    input = wait.until(
        EC.presence_of_element_located((By.CSS_SELECTOR, "#J_bottomPage > span.p-skip > input"))   #找到输入页码按钮
    )
    submit = wait.until(
        EC.element_to_be_clickable((By.CSS_SELECTOR, "#J_bottomPage > span.p-skip > a"))   #找到确认按钮
    )
    input.clear()
    input.send_keys(page_number)
    submit.click()
    jiexi_page(keyboard)
    # wait.until(
    #      EC.text_to_be_present_in_element((By.CSS_SELECTOR,"#J_bottomPage > span.p-num > a.curr"),str(page_number))
    # )
    # next_page(page_number)

def jiexi_page(keyboard):
    wait.until(
        EC.presence_of_element_located((By.CSS_SELECTOR,"#J_searchWrap .gl-item"))    #判断是否加载成功
    )
    sql = "insert into crawler_crawler (image,address,name,price,commit,shop,keyboard) values (%s,%s,%s,%s,%s,%s,%s)"
    html=browser.page_source
    doc=pq(html)
    items=doc("#J_searchWrap .gl-item").items()   #遍历
    for item in items:
        address = item.find(".p-img").find("a").attr("href")
        img = item.find(".p-img").find('img').attr('src')
        if address:
            address = "https:" + address
        if img:
            img = "https:" + img
        product={
            'img':img,
            'address':address,
            'name':item.find(".p-name.p-name-type-2").text().replace("\n",""),
            'price':item.find(".p-price").text()[1:].replace("\n",""),
            '评价':item.find(".p-commit").find("strong").text()[:-3],
            'shop':item.find(".p-shop").text(),
            'keyboard': keyboard
        }
        product['评价'] = product['评价'].strip("+")
        if "万+条评价" in product['评价']:
            product['评价'] = product['评价'].strip("万+条评价")
            product['评价'] = float(product['评价']) * 10000
        elif "+条评价" in product['评价']:
            product['评价'] = product['评价'].strip("+条评价")
            product['评价'] = float(product['评价']) * 10000
        elif "万" in product['评价']:
            product['评价'] = product['评价'].strip("万")
            product['评价'] = float(product['评价']) * 10000
        product_list = list(product.values())
        print(product_list)
        cursor.execute(sql,tuple(product_list))
        conn.commit()

def main(keyboard,pages):
    search(keyboard)
    time.sleep(1)
    next_page(1,keyboard)
    if not pages:
        pages = 101
    for i in range(2,int(pages)):
        next_page(i,keyboard)
        time.sleep(2)  # 很关键，我调试了好几个小时，由于未找到该元素，页面未刷新，会报错
    cursor.close()
    conn.close()

if __name__=="__main__":
    keyboard = sys.argv[1]
    pages = sys.argv[2]
    main(keyboard,pages)