import re
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq
from config import *
import pymongo
import time

client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]

chrome_options = webdriver.ChromeOptions()
# 使用headless无界面浏览器模式
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')


browser = webdriver.Chrome(chrome_options=chrome_options)
wait = WebDriverWait(browser, 10)


def search():
    try:
        # 打开网页
        browser.get("https://www.jd.com/")
        # 确定输入框
        input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#key"))
        )
        # 确定搜索按钮
        submit = wait.until(EC.element_to_be_clickable(
            (By.CSS_SELECTOR, "#search > div > div.form > button > i")))
        # 输入搜索关键字
        input.send_keys(KEYWORD)
        # 点击搜索按钮
        submit.click()
        # 获取总页数
        total = wait.until(EC.presence_of_element_located(
            (By.CSS_SELECTOR, "#J_bottomPage > span.p-skip > em:nth-child(1)")))
        get_products()
        return total.text
    # 处理超时问题
    except TimeoutException:
        # 递归处理异常
        return search()


def next_page(page_number):
    print('正在翻页', page_number)
    try:
        input = wait.until(EC.presence_of_element_located(
            (By.CSS_SELECTOR, '#J_bottomPage > span.p-skip > input')))
        submit = wait.until(EC.element_to_be_clickable(
            (By.CSS_SELECTOR, '#J_bottomPage > span.p-skip > a')))
        input.clear()
        input.send_keys(page_number)
        submit.click()
        wait.until(EC.presence_of_element_located(
            (By.CSS_SELECTOR, "#J_bottomPage > span.p-num > a.curr")))
        get_products()
        print('第', page_number, "翻页完成")

    except StaleElementReferenceException:
        next_page(page_number)
    # except TimeoutException:
    #     next_page(page_number)


def get_products():
    print('获取商品信息：-- start ---')
    wait.until(EC.presence_of_element_located(
        (By.CSS_SELECTOR, '.gl-item')))
    # 获取网页源代码
    html = browser.page_source
    # 其实问题就出在xmlns="http://www.w3.org/1999/xhtml"这里，pyquery默认解析后的文档是xmlns格式，而这种格式就是造成无法获取原生标签的原因。
    doc = pq(html, parser="html")
    items = doc(".gl-item").items()
    for item in items:
        product = {
            'image-src': item.find('.p-img a img').attr('src'),
            'image-data-lazy-img': item.find('.p-img a img').attr('data-lazy-img'),
            'price': item.find('.p-price').text(),
            'title': item.find('.p-name').text(),
            'shop': item.find('.p-shop').text()
        }
        print(product)
        save_mongodb(product)


def save_mongodb(result):
    try:
        if db[MONGO_TABLE].insert(result):
            print('存储到MONGODB成功', result)
    except Exception:
        print('存储到MONGODB失败', result)


def main():
    try:
        total = search()
        # 正则表达式匹配数字100
        total = int(re.compile(r'(\d+)').search(total).group(1))
        for i in range(2, total + 1):
            next_page(i)
            time.sleep(10)

    except Exception:
        print('出错了')
    finally:
        browser.close()


if __name__ == '__main__':
    main()
