# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
import requests
from time import sleep
from lxml import etree
from config import url,proxy,is_window,sleep_time
from file import set_url_list,get_url_list

# 获取浏览器对象
def get_browser():
   option = ChromeOptions()
   #  关闭自动控制  
   option.add_experimental_option('excludeSwitches', ['enable-automation', 'enable-logging'])
   option.add_experimental_option('useAutomationExtension', False)
   #  代理IP
   BG_proxy = requests.get(proxy).text
   option.add_argument("--proxy-server=" + BG_proxy)
   #  是否显示浏览器
   if not is_window:
     option.add_argument('--headless')
     option.add_argument('--disable-gpu')
   #  干掉全局变量
   browser = webdriver.Chrome(options=option)
   browser.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument', {
         'source': 'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'
         })
   return browser

web = get_browser()

goods_list = []
def get_pagination(pageNum, total=10):
  web.get(url)
  tab_url = web.find_elements_by_xpath('//*[@id="search-bar"]/div[2]/div/div/div/ul/li/div[1]/a')
  arr = []
  for item in tab_url:
    arr.append(item.get_attribute('href'))
  print("获取第" + str(pageNum) + "页地址成功")
  sleep(2)
  pageNum += 1
  if pageNum <= total:
    next_page = web.find_elements_by_xpath('//*[@class="next"]')
    next_page[0].click()
    get_pagination(pageNum,total)
  else:
    web.quit()
  


get_pagination(1)


set_url_list(goods_list)
arr = get_url_list()

for item in arr:
  print(len(item))