import requests
from bs4 import BeautifulSoup
import os
from urllib.parse import urljoin

# 网页URL（请替换为你要爬取的网页地址）
web_url = "https://example.com/downloads"  # 示例地址，需替换

# 下载文件保存路径（C盘下的downloads文件夹）
save_dir = "C:/downloads"
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

try:
    # 获取网页内容
    response = requests.get(web_url, timeout=100)
    response.raise_for_status()  # 检查请求是否成功
    soup = BeautifulSoup(response.text, 'html.parser')

    # 提取所有链接
    links = soup.find_all('a')
    download_links = []

    # 筛选可能的下载链接（可根据需要添加更多文件后缀）
    file_extensions = ('.zip', '.rar', '.pdf', '.exe', '.docx', '.xlsx', '.jpg', '.png')
    for link in links:
        href = link.get('href')
        if href:
            # 处理相对路径，转换为绝对URL
            absolute_url = urljoin(web_url, href)
            if absolute_url.endswith(file_extensions):
                download_links.append(absolute_url)

    print(f"找到 {len(download_links)} 个可下载链接，开始下载...")

    # 批量下载文件
    for i, url in enumerate(download_links, 1):
        try:
            # 获取文件名（从URL中提取）
            file_name = os.path.basename(url).split('?')[0]  # 去除URL参数
            save_path = os.path.join(save_dir, file_name)

            # 下载文件
            with requests.get(url, stream=True, timeout=30) as r:
                r.raise_for_status()
                with open(save_path, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=8192):
                        f.write(chunk)
            print(f"已下载 {i}/{len(download_links)}: {file_name}")

        except Exception as e:
            print(f"下载失败 {url}: {str(e)}")

except Exception as e:
    print(f"获取网页失败: {str(e)}")

print("下载任务结束")