import os
import pandas as pd
import requests
from urllib.parse import urljoin
# 定义下载函数
def download_file(url, save_path):
    response = requests.get(url)
    with open(save_path, "wb") as f:
        f.write(response.content)
# 下载所有数据
def download_all(list: list[str], save_directory: str):
    count = 0
    for row in list:
        for url_string in row:
            urls = url_string.split("https://")
            for url in urls:
                # 如果url中不含有'.pdf'，则跳过
                if url.find(".pdf") == -1:
                    continue
                url = "https://" + url
                # 删除'.pdf'之后的内容
                url = url[: url.find(".pdf") + 4]
                file_name = url.split("/")[-1]
                download_file(url, os.path.join(save_directory, file_name))
                count += 1
                # 提示正在下载第几个pdf文件
                print(f"正在下载目录{save_directory}中的第{count}个pdf文件,url == {url}")
    print(count)
# 读取Excel文件
excel_file = "./read_file_nema.xlsx"
df = pd.read_excel(excel_file, sheet_name=None)
# 读取df中不同的sheet
sheets = df.keys()
for sheet_name in sheets:
    # 根据sheet名称在./下创建文件夹
    save_directory = os.path.join("./", sheet_name)
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)
    df = pd.read_excel(excel_file, engine="openpyxl", sheet_name=sheet_name)
    # 获取该sheet下所有数据
    list = df.values.tolist()
    download_all(list, save_directory)
