import random
import requests
from bs4 import BeautifulSoup
from lxml import etree
import re
import json
import time
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
import selenium.webdriver.support.ui as ui
from x import xx
start=100
end=200
n=0
chrome_options = Options()
##chrome_options.add_argument('--headless') #浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
##chrome_options.add_argument('--disable-gpu') #谷歌文档提到需要加上这个属性来规避bug
#####针对UA请求头的操作，防止因为没有添加请求头导致的访问被栏截了
chrome_options.add_argument('User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) >AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/87.0.664.57')
##chrome_options.add_argument('--no-sandbox')
##chrome_options.add_argument('--hide-scrollbars') #隐藏滚动条, 应对一些特殊页面
##chrome_options.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度
chrome_options.add_argument("window-size=720,620")
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option('useAutomationExtension', False)
driver = webdriver.Chrome(options=chrome_options)
#规避检查
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
  "source": """
    Object.defineProperty(navigator, 'webdriver', {
      get: () => undefined
    })
  """
}) 
with open(r'C:\stealth.min.js') as f:
    js = f.read()
 
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
  "source": js
})
wait = WebDriverWait(driver, 1)
##登录亚马逊
logurl = 'https://www.baidu.com'
message=""
##设置匹配规则
pattern = re.compile(r'"url":"(https?:[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|])","iconUrl"')
pattern1 = re.compile(r'"list":\[(.*?)\],')
pattern2 = re.compile(r'"raw":"(\b(?!null)\S+)","normalized"')
#登录前清楚所有cookie
##driver.delete_all_cookies()
driver.get(logurl)
##f1 = open('vcyber.json')
##wait = ui.WebDriverWait(driver,15)
##cookie = f1.read()
##cookie = json.loads(cookie)
##for c in cookie:
##    driver.add_cookie(c)
time.sleep(random.random())



def tohome(url,wstr):
    f = open(wstr, mode="a", encoding="utf-8")
    i=1
    driver.get(url)
    time.sleep(random.random())
    r=driver.page_source
    print(r)
    result = pattern.findall(r)
    result1 = pattern1.findall(r)
    result2 = pattern2.findall(r)
    t=pattern2.findall(r)
    print(t)
    if (result !=[]):
	    print(result)
	    f.write('"'+url+'",')
	    for line in result:
		    print(line)
		    i=i+1
		    f.write(line+',')
	    for num in range(i,6):
		    f.write(',')
		    print(num)
	    i=1
	    if (result1 !=[]):
		    for line in result1[0].split(','):
			    print(line)
			    f.write(line+',')
			    i=i+1
		    for num in range(i,4):
			    f.write(',')
			    print(num)
	    else:
		    f.write(',,,')
	    
	    if (result2 !=[]):
		    print(result2[0])
		    f.write(result2[0]+'\n')
	    else:
		    f.write('\n')
    elif(t !=[]):
        print(t[0])
        f.write('"'+url+'",,,,,,')
        s=1
        if (result1 !=[]):
            for line in result1[0].split(','):
                print(line)
                f.write(line+',')
                s=s+1
            for num in range(s,4):
                f.write(',')
                print(num)
        else:
            f.write(',,,')
	    
        f.write(t[0]+'\n')


xx.lines(tohome,"mgmrt.txt","mgmrtsj1.csv",start,end)
        