import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from urllib.parse import urljoin

base_url = "https://www.baidu.com" # 将此处的URL替换为您要提取下载链接的网页URL

# 定义一个函数，用于获取页面内容并解析HTML
def get_target_links(url):
    chrome_options = Options()
    chrome_options.add_argument("--headless")  # 无头模式，可以在后台运行Chrome浏览器
    chrome_driver_path = "/usr/local/bin/chromedriver"  # Chrome浏览器驱动程序的路径，需要根据本地环境进行调整
    # 使用Chrome浏览器打开网页
    driver = webdriver.Chrome(chrome_driver_path, options=chrome_options)
    driver.get(url)

    last_height = driver.execute_script("return document.body.scrollHeight")
    while True:
        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(2)
        new_height = driver.execute_script("return document.body.scrollHeight")
        if new_height == last_height:
            break
        last_height = new_height

    soup = BeautifulSoup(driver.page_source, 'html.parser')
    download_links = soup.find_all("a", href=True)
    links = [item.get("href") for item in download_links if "2021黑马Python6.5就业班" in item.get("href")]
    driver.quit()
    return links

# 定义一个函数，用于获取指定页面中所有链接的列表
def get_links(url):
    print(f"url:{url}")
    absolute_url = urljoin(base_url, url)
    links = get_target_links(absolute_url)
    return links

# 定义一个函数，用于递归地获取所有链接的列表
def get_all_links(url, depth):
    links = []
    if depth == 0:
        return links
    current_links = get_links(url)
    links += current_links
    for link in current_links:
        links += get_all_links(link, depth-1)
    return links

# 在这里使用get_all_links函数获取vue动态网页的所有链接
all_links = get_all_links(base_url, 6)
# 遍历所有链接，并提取下载链接
for link in all_links:
    if "2021黑马Python6.5就业班【资料齐全】" in link.get("href"):  # 将此处的"download"替换为您要提取的下载链接的关键词
        print(link.get("href"))  # 打印下载链接
