#!/usr/bin/env python3
import csv
import os
import subprocess
from urllib.parse import urlparse

def get_filename_from_url(url):
    """从URL中提取文件名"""
    path = urlparse(url).path
    return os.path.basename(path)

def download_file(url, output_dir):
    """使用wget下载文件"""
    filename = get_filename_from_url(url)
    output_path = os.path.join(output_dir, filename)
    
    # 如果文件已存在，跳过下载
    if os.path.exists(output_path):
        print(f"文件已存在，跳过: {filename}")
        return True
    
    try:
        print(f"正在下载: {url}")
        # 使用wget下载文件，设置超时和重试
        cmd = [
            'wget',
            '--timeout=30',
            '--tries=3',
            '--retry-connrefused',
            '--no-check-certificate',  # 某些网站可能需要
            '-O', output_path,
            url
        ]
        subprocess.run(cmd, check=True)
        print(f"下载成功: {filename}")
        return True
    except subprocess.CalledProcessError as e:
        print(f"下载失败: {url}")
        print(f"错误: {str(e)}")
        # 如果下载失败，删除可能部分下载的文件
        if os.path.exists(output_path):
            os.remove(output_path)
        return False

def main():
    # 创建下载目录
    download_dir = "downloads_deps_tar"
    os.makedirs(download_dir, exist_ok=True)
    
    # 读取CSV文件
    success_count = 0
    fail_count = 0
    
    with open('archive_urls.csv', 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            url = row['URL']
            if download_file(url, download_dir):
                success_count += 1
            else:
                fail_count += 1
    
    print("\n下载统计:")
    print(f"成功: {success_count}")
    print(f"失败: {fail_count}")
    print(f"总计: {success_count + fail_count}")

if __name__ == "__main__":
    main()