import os
import requests
import argparse
from tqdm import tqdm

def download_file(url, save_dir, chunk_size=1024*1024, timeout=10):
    """
    下载单个文件，支持断点续传
    :param url: 文件URL
    :param save_dir: 保存目录
    :param chunk_size: 下载块大小
    :param timeout: 超时时间(秒)
    """
    try:
        # 获取文件名
        filename = url.split('/')[-1].split('?')[0]
        if not filename:
            filename = f"download_{hash(url)}.dat"
        
        save_path = os.path.join(save_dir, filename)
        
        # 检查是否已部分下载
        resume_byte_pos = 0
        if os.path.exists(save_path):
            resume_byte_pos = os.path.getsize(save_path)
            print(f"发现部分下载，将从 {resume_byte_pos} 字节继续")
        
        # 设置请求头，支持断点续传
        headers = {"Range": f"bytes={resume_byte_pos}-"} if resume_byte_pos > 0 else {}
        
        # 发送请求
        with requests.get(url, headers=headers, stream=True, timeout=timeout) as r:
            r.raise_for_status()
            
            # 获取文件总大小
            total_size = int(r.headers.get('content-length', 0)) + resume_byte_pos
            
            # 打开文件（追加模式）
            mode = 'ab' if resume_byte_pos > 0 else 'wb'
            with open(save_path, mode) as f, tqdm(
                total=total_size,
                unit='B',
                unit_scale=True,
                unit_divisor=1024,
                initial=resume_byte_pos,
                desc=filename
            ) as pbar:
                for chunk in r.iter_content(chunk_size=chunk_size):
                    if chunk:  # 过滤掉保持连接的空块
                        f.write(chunk)
                        pbar.update(len(chunk))
        
        print(f"下载完成: {filename}")
        return True
        
    except Exception as e:
        print(f"下载失败 {url}: {str(e)}")
        return False

def batch_download(url_file, save_dir):
    """从包含URL的文件中批量下载"""
    # 创建保存目录
    os.makedirs(save_dir, exist_ok=True)
    
    # 读取URL列表
    try:
        with open(url_file, 'r', encoding='utf-8') as f:
            urls = [line.strip() for line in f if line.strip().startswith(('http://', 'https://'))]
        
        print(f"发现 {len(urls)} 个URL，开始下载...")
        
        # 逐个下载
        success = 0
        for url in urls:
            if download_file(url, save_dir):
                success += 1
        
        print(f"\n下载完成！成功: {success}/{len(urls)}")
        
    except Exception as e:
        print(f"处理URL列表失败: {str(e)}")

if __name__ == "__main__":
    # 安装依赖提示
    try:
        import tqdm
    except ImportError:
        print("请先安装依赖: pip install requests tqdm")
        exit(1)
    
    parser = argparse.ArgumentParser(description='批量文件下载脚本')
    parser.add_argument('url_file', help='包含URL列表的文本文件（每行一个URL）')
    parser.add_argument('save_dir', help='文件保存目录')
    
    args = parser.parse_args()
    batch_download(args.url_file, args.save_dir)
