import time
import requests
import logging
from selenium import webdriver
from selenium.webdriver import Keys
from selenium.webdriver.common.by import By

option = webdriver.EdgeOptions()
# # -我是最新谷歌浏览器版本，chrome在79和79版之后用这个，
# option.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
#     "source":
#         """
# 　　    Object.defineProperty(navigator, 'webdriver', {
# 　　      get: () => undefined
# 　　    })
# 　　  """
# })
option.add_experimental_option("detach", True)
option.add_experimental_option('excludeSwitches',['enable-automation']) # 开启实验性功能
# 去除特征值
option.add_argument("--disable-blink-features=AutomationControlled")

UAlist = [
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0'
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
]
option.add_argument("user-agent=" + UAlist[0])
browser = webdriver.Edge(options=option)
# 修改get方法
script = '''object.defineProperty(navigator,'webdriver',{undefinedget: () => undefined})'''
# execute_cdp_cmd用来执行chrome开发这个工具命令
browser.execute_cdp_cmd("page.addscriptToEvaluateonNewDocument", {"source": script})

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s : %(message)s')


def getCodeBase64():
    getcodeimgurl = 'https://aegis.qq.com/speed?id=3046&uin=1145769693&version=1.43.6&aid=6d0e088c-e176-426f-ae7c-bd7542aa06a8&env=production&platform=4&netType=4&vp=1432%20*%20818&sr=1920%20*%201080&sessionId=session-1706256542180&from=https%3A%2F%2Furlsec.qq.com%2Fcheck.html%3Furl%3Dsdfsd.com&referer='
    response = requests.get(getcodeimgurl)
    return response


def verifyImg():
    verifyurl = 'https://t.captcha.qq.com/cap_union_new_verify'
    '''
    POST /cap_union_new_verify HTTP/1.1
    Accept: application/json, text/javascript, */*; q=0.01
    Accept-Encoding: gzip, deflate, br
    Accept-Language: zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7
    Cache-Control: no-cache
    Connection: keep-alive
    Content-Length: 4853
    Content-Type: application/x-www-form-urlencoded; charset=UTF-8
    Cookie: pac_uid=0_b9cc3bcb8c8cb; iip=0; RK=i7c1Ssu5EW; ptcz=4c148ec65388a70f82be785efa2c688f0d328c5596a3ee2389d38e12e6092a7b; uin=o1145769693; TDC_itoken=1104903242%3A1706181314; _clck=op6thk|1|fiq|0; skey=@4R9uR4fP8
    DNT: 1
    Host: t.captcha.qq.com
    Origin: https://t.captcha.qq.com
    Pragma: no-cache
    Referer: https://t.captcha.qq.com/cap_union_new_show?aid=2046626881&protocol=https&accver=1&showtype=popup&ua=TW96aWxsYS81LjAgKE1hY2ludG9zaDsgSW50ZWwgTWFjIE9TIFggMTBfMTVfNykgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzEyMC4wLjAuMCBTYWZhcmkvNTM3LjM2IEVkZy8xMjAuMC4wLjA%3D&noheader=1&fb=1&aged=0&enableAged=0&enableDarkMode=0&grayscale=1&clientype=2&sess=s0ulZMbsp86iMylC_Bwufm-apDQ7i-fxU_HEDLnScJnSjA15Z0TByqHdbQ3VqcKCazB-Iprp4WCkD_9pUZqYasdLkS06e8WAQV_eMCjYDYcFUbub4_UPT2JAKaOss5Abs11D6vOl9y_hVqA2XiKzFz2Z4D2V27rlc-xkWfofiY9Zf2B2IX3IRpj8p-TQ80fwrFVeHTgEzj-uNw993gVMiUToDYssIEnkoX4qExRFz7X6nQRBgSJEqx9Rg5f5Y_n_V5-x2OSJLB5UE*&fwidth=0&sid=7156559164350590976&wxLang=&tcScale=1&uid=&cap_cd=&rnd=972383&prehandleLoadTime=123&createIframeStart=1706256667169&global=0&subsid=4
    Sec-Fetch-Dest: empty
    Sec-Fetch-Mode: cors
    Sec-Fetch-Site: same-origin
    User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0
    X-Requested-With: XMLHttpRequest
    sec-ch-ua: "Not_A Brand";v="8", "Chromium";v="120", "Microsoft Edge";v="120"
    sec-ch-ua-mobile: ?0
    sec-ch-ua-platform: "macOS"
    '''
    datas = {}
    response = requests.post(verifyurl, data=datas)
    return response


try:
    browser.get('https://urlsec.qq.com/check.html')
    searchInput = browser.find_element(By.XPATH, '//*[@id="check-input"]')
    time.sleep(1)
    searchInput.send_keys('bluedot.com')
    searchInput.send_keys(Keys.ENTER)
except Exception as e:
    print('lll')
    print(e)
    # browser.close()
time.sleep(10000)

resultdiv = browser.find_element(By.CLASS_NAME, 'check-result')
resultdiv.find_element(By.CSS_SELECTOR, "p[style='display:black']").text

print(resultdiv)
exit()


def scrape_page(url):
    logging.info('scraping %s...', url)
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        logging.error('get url invalid code $s while scraping %s', response.status_code, url)
    except requests.RequestException:
        logging.error('error exceptions while scraping %s', url, exc_info=True)


def scrape_index(page):
    index_url = f'{BASE_URL}/page/{page}'
    return scrape_page(index_url)


def scrape_detail(detail_id):
    # detail_url = f'{BASE_URL}/detail/{detail_id}'
    return scrape_page(detail_id)


# 解析首页html
def parse_index(html):
    # pattern = re.compile('<a.*?href="(.*?)".*?class="name">', re.S)
    pattern = re.compile('<a.*?href="(.*?)".*?class="name">')
    items = re.findall(pattern, html)
    if not items:
        return []
    for item in items:
        # detail_url=urljoin(BASE_URL,item)
        detail_url = BASE_URL + '' + item
        logging.info('get detail url %s', detail_url)
        yield detail_url


def parse_detail(html):
    print('begin parse detail...')
    cover_pattern = re.compile('<img.*?src="(.*?)".*?class="cover">', re.S)
    title_pattern = re.compile('<h2.*?>(.*?)</h2>')
    categories_pattern = re.compile('<button.*?category.*?<span>(.*?)</span>.*?</button>', re.S)
    published_pattern = re.compile('(\d{4}-\d{2}-\d{2}).*?上映')
    # address_pattern=re.compile('')
    info_pattern = re.compile('<div.*?drama.*?>.*?<p.*?>(.*?)</p>', re.S)
    score_pattern = re.compile('<p.*?score.*?>(.*?)</p>', re.S)
    cover = re.search(cover_pattern, html).group(1).strip() if re.search(cover_pattern, html) else None
    name = re.search(title_pattern, html).group(1).strip() if re.search(title_pattern, html) else None
    categories = re.search(categories_pattern, html).group(1).strip() if re.search(categories_pattern, html) else None
    published_at = re.search(published_pattern, html).group(1).strip() if re.search(published_pattern, html) else None
    drama = re.search(info_pattern, html).group(1).strip() if re.search(info_pattern, html) else None
    score = float(re.search(score_pattern, html).group(1).strip()) if re.search(score_pattern, html) else None
    return {
        'cover': cover,
        'name': name,
        'categories': categories,
        'published_at': published_at,
        'drama': drama,
        'score': score
    }


def main():
    index_html = scrape_index(page)


if __name__ == '__main__':
    main()
