from selenium import webdriver
import time
import json
from bs4 import BeautifulSoup

# 建立一个空列表存储链接
everyPages = []
# 遍历前9个页面，获取页面的源代码
for i in range(1, 10):
    print(i)
    boss = webdriver.Edge()
    # 打开网页
    url = f'https://www.zhipin.com/web/geek/job?city=101250100&page={i}'
    boss.get(url)
    # 注入cookie
    with open(r"../boss直聘Edge.json", "r") as fp:
        jsonCookies = fp.read()
    # 将JSON格式的Cookie转换为字典
    cookies = json.loads(jsonCookies)
    # 4.添加Cookie到WebDriver对象
    for cookie in cookies:
        boss.add_cookie(cookie)
    # 进入网页等待6s加载，然后获取源代码
    boss.get(url)
    time.sleep(4)
    boss_text = boss.page_source
    everyPages.append(boss_text)



print("下一步")


# 根据页面源代码，获取每个页面的岗位链接
详情列表 = []
for i in everyPages:
    # 将源代码加载进beatifulsoup
    soup = BeautifulSoup(i, 'html.parser')
    # 查找所有 class="job-card-left" 的元素
    job_card_left_elements = soup.find_all(class_='job-card-left')
    # 遍历每个元素，获取 <a> 标签的 href 链接
    for element in job_card_left_elements:
        href = element['href']
        full_link = 'https://www.zhipin.com' + href
        详情列表.append(full_link)

源代码=[]
count=0
for i in 详情列表:
    boss = webdriver.Edge()
    # 打开网页
    url =f'{i}'
    boss.get(url)
    # 注入cookie
    with open(r"../boss直聘Edge.json", "r") as fp:
        jsonCookies = fp.read()
    # 将 JSON 格式的 Cookie 转换为字典
    cookies = json.loads(jsonCookies)
    # 添加 Cookie 到 WebDriver 对象
    for cookie in cookies:
            boss.add_cookie(cookie)
    boss.get(url)
    time.sleep(5)
    boss_text = boss.page_source
    print("第"+str(count)+"个")
    count+=1
    源代码.append(boss_text)

#保存到本地
for index , code in enumerate(源代码):
    filename = f'source_code_page_{index + 1}.html'
    with open(filename, 'w', encoding='utf-8') as file:
        print(code)
        file.write(code)
        print(f'源代码保存为 {filename} 成功！')
