import requests
from bs4 import BeautifulSoup
import os
import urllib.parse
from concurrent.futures import ThreadPoolExecutor, as_completed

def get_links(url):
    try:
        print("url:" + url)
        response = requests.get(url)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')
        links = []
        for link in soup.find_all('a'):
            href = link.get('href')
            if href and href != '../':
                links.append(urllib.parse.urljoin(url, href))
        return links
    except requests.RequestException as e:
        print(f"请求出错: {e}")
        return []


def download_file(url, local_path):
    try:
        # 检查文件是否已经存在
        if os.path.exists(local_path):
            print(f"文件已存在，跳过下载: {local_path}")
            return

        if((url.endswith('jar') or url.endswith('pom')) and not url.endswith('-sources.jar')) and (not url.endswith('-javadoc.jar')):
            response = requests.get(url)
            response.raise_for_status()
            with open(local_path, 'wb') as file:
                file.write(response.content)
            print(f"下载成功: {local_path}")

    except requests.RequestException as e:
        print(f"下载文件出错: {e}")


def crawl_directory(url, local_base_dir):
    links = get_links(url)
    with ThreadPoolExecutor(max_workers=10) as executor:  # 设置最大线程数
        futures = []
        for link in links:
            parsed_url = urllib.parse.urlparse(link)
            path = parsed_url.path
            local_path = os.path.join(local_base_dir, path.lstrip('/'))
            if link.endswith('/'):
                os.makedirs(local_path, exist_ok=True)
                crawl_directory(link, local_base_dir)
            else:
                os.makedirs(os.path.dirname(local_path), exist_ok=True)
                futures.append(executor.submit(download_file, link, local_path))

        # 等待所有任务完成
        for future in as_completed(futures):
            future.result()


if __name__ == "__main__":
    base_url = "https://repo1.maven.org/maven2/org/springframework/"
    local_base_dir = "downloaded_files"
    os.makedirs(local_base_dir, exist_ok=True)
    crawl_directory(base_url, local_base_dir)
    