import time
import datetime
from selenium import webdriver
import csv
from selenium.webdriver.common.devtools.v123.network import Cookie
from selenium.webdriver.edge.options import Options
from selenium.webdriver.edge.options import Options
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
from selenium.webdriver.support.wait import WebDriverWait


#获取页面所有链接

def get_data(url):
    url = 'https://www.chinahr.com/job'
    User_Agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0'
    cookie = """
    privacyPolicyPopup=false; JSESSIONID=ABAACCCACCDABCB9C40B683E57971019E23C710CD81A113; WEBTJ-ID=20240612101832-1900a3d5e331be-060491b9f4bfb7-4c657b58-1327104-1900a3d5e3418af; sajssdk_2015_cross_new_user=1; _ga=GA1.2.227876498.1718158713; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1718158713; _gid=GA1.2.2113964513.1718158713; user_trace_token=20240612101832-945166ba-6791-4dc1-b65c-9127bf24b1de; LGUID=20240612101832-fbfd93f5-f99d-4ffb-af9b-bb630ec708ce; sensorsdata2015session=%7B%7D; LG_HAS_LOGIN=1; hasDeliver=0; privacyPolicyPopup=false; __lg_stoken__=92948760b5830d87e4a37204561a044f6bbdc960f3538ffd847248d3129cd6401ec00cf067bca3d55b1bbe8fb75caf8c1650435a57a154ff90d2a782c1c421c1b8f7db6a32ab; X_MIDDLE_TOKEN=9475c1e40a643856a53520a7bc5d0ca5; index_location_city=%E5%85%A8%E5%9B%BD; mds_login_authToken="K6TYc6aoIzV0LqkrelhWsIF7I7ojL1wn3We1P2Y00vM20AFEzvGckOCYC7GUeX6061/bQLXxQPnKucxz33jRrXcUYKmEznit/q/6dbxMTouCGlH/xWKc+ehIhINcK/XFgOs6F2gNAnSnwtAm6zT/JAJ7p8/JPGf+twioHUk2YcJ4rucJXOpldXhUiavxhcCELWDotJ+bmNVwmAvQCptcy5e7czUcjiQC32Lco44BMYXrQ+AIOfEccJKHpj0vJ+ngq/27aqj1hWq8tEPFFjdnxMSfKgAnjbIEAX3F9CIW8BSiMHYmPBt7FDDY0CCVFICHr2dp5gQVGvhfbqg7VzvNsw=="; SEARCH_ID=1266345aa9fc49688aeca112a836896c; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; gate_login_token=v1####622bfece39d2ce8fdea865789844f6ae7b05ce677aa764fd6cf15f91ab8ffc0f; LG_LOGIN_USER_ID=v1####91b50a02dd43390225648e903ef01128a4c9750a709b42db4c55a10d15328e2c; _putrc=44E6A3A6F1583E6D123F89F2B170EADC; login=true; unick=%E7%94%A8%E6%88%B75126; TG-TRACK-CODE=index_code; X_HTTP_TOKEN=f558eea6f7239a6787830281715246eb1d5a169dcf; _gat=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2227083082%22%2C%22%24device_id%22%3A%221900a3d5f4a8c3-083e9fff45624f-4c657b58-1327104-1900a3d5f4b3fd%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.bing.com%2F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%2C%22%24latest_utm_source%22%3A%22PC_SEARCH%22%2C%22%24os%22%3A%22Windows%22%2C%22%24browser%22%3A%22Chrome%22%2C%22%24browser_version%22%3A%22125.0.0.0%22%2C%22%24latest_referrer_host%22%3A%22www.bing.com%22%7D%2C%22first_id%22%3A%221900a3d5f4a8c3-083e9fff45624f-4c657b58-1327104-1900a3d5f4b3fd%22%7D; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1718204600; LGSID=20240612230320-1783bfcb-9413-4a90-a7d2-82593feb3449; PRE_UTM=; PRE_HOST=www.bing.com; PRE_SITE=https%3A%2F%2Fwww.bing.com%2F; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fs%2Flist%5F6a1fc37e4511facf3bf1d83fc81cf19e; LGRID=20240612230320-dfa276cd-03e8-4621-a802-f0662da57362; _ga_DDLTLJDLHH=GS1.2.1718204602.7.0.1718204602.60.0.0"""
    options = Options()

    # 反爬虫
    options.use_chromium = True
    options.add_argument(User_Agent)
    # options.add_experimental_option('excludeSwitches', ['enable-automation'])
    options.add_experimental_option('useAutomationExtension', False)
    options.add_argument("--disable-blink-features=AutomationControlled")

    # web = webdriver.Edge(service=Service("./msedgedriver.exe"), options=options)
    web0 = webdriver.Edge(service=Service("./msedgedriver.exe"))

    web0.get(url)
    time.sleep(2)

    ALL_DATA = []
    web0.find_element(By.XPATH, '//*[@id="app"]/div/div[2]/div[2]/div/div[3]/div/ul/li[3]/div/div/span').click()
    time.sleep(5)
    html = web0.page_source

    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(html, 'html.parser')
    # 找到所有style为"margin-bottom: 10px;"的div元素

    #所有的urls
    urlssss = []
    for i in range(1,53):
        # 获取网页源代码
        html = web0.page_source

        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html, 'html.parser')
        # 找到所有style为"margin-bottom: 10px;"的div元素
        divs = soup.find_all('div', attrs={'data-v-75853b77': True})

        flag = True
        urls = []

        all_data = []

        for div in divs:
            if flag:
                # 从每个a元素中提取出网址
                links = div.find_all('a', attrs={'data-v-ce4ca926': True}, class_='detail-card_left')
                urls = [link.get('href') for link in links]
                flag = False
        urlssss.append(urls)

        button = web0.find_element(By.CLASS_NAME, 'btn-next')
        button.click()

    uuuullllsss = []
    for urls in urlssss:
        for i in urls:
            true_url = "https://www.chinahr.com" + i
            uuuullllsss.append(true_url)
    print(uuuullllsss)
    print(len(uuuullllsss))

# def mul_thread():
#     url = 'https://music.163.com/#/discover/playlist/?cat=%E5%8F%A4%E9%A3%8E&order=hot'
#     with concurrent.futures.ThreadPoolExecutor() as executor:
#         for i in range(10, 15):
#             executor.submit(paqu, url, i)


if __name__ == '__main__':

    all_data = get_data('https://www.chinahr.com/job')






# divs = web.find_elements(By.XPATH, '/html/body/div[1]/div/div[2]/div[3]/div/div')
#
# urls = []
# for div in divs:
#     temp = div.find_element(By.XPATH, 'div[2]/div/a')
#     urls.append(temp.get_attribute("href"))
# print(urls)
