import os

import requests
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

'''
获取网页的子链接，并下载离线html
'''
def get_links(url):
    """获取指定网页的所有跳转链接"""
    # 创建 Chrome WebDriver
    service = Service(r'D:\App\chromedriver-win64\chromedriver.exe')  # 使用原始字符串
    service.start()
    driver = Chrome(service=service)  # 使用 Chrome() 创建 Chrome WebDriver

    # 打开网页
    driver.get(url)

    # 获取所有链接
    links = [link.get_attribute('href') for link in driver.find_elements(By.TAG_NAME, 'a')]

    # 关闭 WebDriver
    driver.quit()

    return links


def download_html(url, folder, session):
    """下载指定链接对应的网页内容，并保存为离线网页"""
    response = session.get(url)
    filename = os.path.join(folder, url.split('/')[-1])
    with open(filename, 'wb') as f:
        f.write(response.content)


def download_all_links(urls, folder, session):
    """批量下载所有链接对应的网页内容"""
    for url in urls:
        download_html(url, folder, session)


def main():
    # 设置要获取链接的网页 URL
    url = 'http://svip.iocoder.cn/categories/MyBatis'
    # 设置保存离线网页的文件夹路径
    folder = r'E:\资料-学习\1.信息技术资料\芋道\源码系列\《MyBatis 源码解析》'

    # 检查文件夹是否存在，如果不存在则创建
    if not os.path.exists(folder):
        os.makedirs(folder)

    # 创建 Session 对象，并提供用户名和密码
    session = requests.Session()
    session.auth = ('yudao', 'jiankan')

    # 获取网页的所有链接
    links = get_links(url)  # 只传递了一个参数
    print("获取到的链接：")
    for link in links:
        print(link)

    # 下载所有链接对应的网页内容
    download_all_links(links, folder, session)


if __name__ == "__main__":
    main()
