# -*- coding: utf-8 -*-

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
from bs4 import BeautifulSoup as bs
import re

# 初始化
def init_driver(url):
    chrome_options = Options()
    # chrome_options.add_argument('--headless') # 无头模式
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_experimental_option('excludeSwitches', ['enable-automation']) # 设置开发模式，避免被检测
    # driver = webdriver.Chrome(chrome_options=chrome_options) # 已经设置PATH环境变量
    # 未在PATH环境变量中设置，需要设置驱动可执行路径
    driver = webdriver.Chrome(executable_path='D:\\webdriver\\chromedriver.exe', chrome_options=chrome_options)
    driver.maximize_window()  # 窗口最大化
    driver.get(url)
    # html = driver.page_source
    # print(html)
    return driver


# 获取页面url
def get_url(drive):
    # 设置等待
    wait = WebDriverWait(driver, 10)
    html = etree.HTML(driver.page_source)

    WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.ID, "kw"))).send_keys("美的集团")
    driver.find_element_by_id('su').click()

    while True:
        soup = bs(driver.page_source,'lxml')
        urls = soup.find_all(name='a',attrs={'data-click':re.compile(('.')),'class':None})
        tagh3 = soup.find_all('h3')
        hrefs = re.findall('<a.*?href=(\".*?\").*?>.*?</a>',driver.page_source)

        selector = etree.HTML(driver.page_source)

        root = selector.xpath('//div[@class="content_left"]//h2')
        for c in root:
            title = "".join(c.xpath('./h3/a//text()'))
            # url = c.xpath('./div[@class="c-row c-gap-top-small"]/div[2]/div[2]/a[1]')[0].text
            print(title)

        root = selector.xpath('//div[@class="result c-container "]')
        for c in root:
            title = "".join(c.xpath('./h3/a//text()'))
            # url = c.xpath('./div[@class="c-row c-gap-top-small"]/div[2]/div[2]/a[1]')[0].text
            print(title)
        root = selector.xpath('//h2')
        for c in root:
            title = "".join(c.xpath('./a//text()'))
            # url = c.xpath('./div[@class="c-row c-gap-top-small"]/div[2]/div[2]/a[1]')[0].text
            print(title)
        root = selector.xpath('//h3')
        for c in root:
            title = "".join(c.xpath('./a//text()'))
            # url = c.xpath('./div[@class="c-row c-gap-top-small"]/div[2]/div[2]/a[1]')[0].text
            print(title)
        # 点击下一页
        flag = True
        for a in driver.find_elements_by_xpath('//div[@id="page"]//a'):
            if a.text == "下一页>":
                a.click()
                flag = False
                break
        time.sleep(3)
        if flag:
            break
        # 点击下一页

    # 点击下一页
    driver.find_element_by_xpath('//li[@class="next"]/a').click()





def close_driver(driver):
    driver.quit()

if __name__ == '__main__':
    url = "https://www.baidu.com/"
    driver = init_driver(url)
    get_url(driver)

