import requests
import os
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
import time  # 新增：导入time模块用于时间统计

def download_chunk(url, start_byte, end_byte, chunk_path, progress_callback):
    headers = {"Range": f"bytes={start_byte}-{end_byte}"}
    response = requests.get(url, headers=headers, stream=True)
    downloaded = 0
    with open(chunk_path, "wb") as f:
        for chunk in response.iter_content(chunk_size=8192):
            if chunk:
                f.write(chunk)
                downloaded += len(chunk)
                progress_callback(len(chunk))

def multithread_download(url, file_path, num_threads=8):
    # 获取文件大小
    response = requests.head(url)
    total_size = int(response.headers.get("Content-Length", 0))

    # 创建目标目录
    os.makedirs(os.path.dirname(file_path), exist_ok=True)

    # 分块下载
    chunk_size = total_size // num_threads
    chunk_paths = []

    print("开始多线程下载，进度条如下>>>>>>>>")
    start_time = time.time()  # 新增：记录下载开始时间
    with tqdm(total=total_size, unit='B', unit_scale=True, desc=file_path) as pbar:
        with ThreadPoolExecutor(max_workers=num_threads) as executor:
            futures = []
            for i in range(num_threads):
                start_byte = i * chunk_size
                end_byte = start_byte + chunk_size - 1 if i < num_threads - 1 else total_size - 1
                chunk_path = f"{file_path}.part{i}"
                chunk_paths.append(chunk_path)
                future = executor.submit(download_chunk, url, start_byte, end_byte, chunk_path, lambda x: pbar.update(x))
                futures.append(future)

            for future in futures:
                future.result()

    end_time = time.time()  # 新增：记录下载结束时间
    download_time = end_time - start_time  # 新增：计算下载时间
    print(f"多线程下载完成，耗时：{download_time:.2f}秒")  # 新增：打印下载时间



    # 合并分块文件
    print("开始合并！！！请稍等>>>>>>>>")
    start_time = time.time()

    # 计算总分块大小
    total_chunk_size = sum(os.path.getsize(chunk_path) for chunk_path in chunk_paths)

    with open(file_path, "wb") as f:
        with tqdm(total=total_chunk_size, unit='B', unit_scale=True, desc="合并进度") as pbar:
            for chunk_path in chunk_paths:
                with open(chunk_path, "rb") as chunk:
                    f.write(chunk.read())
                    pbar.update(os.path.getsize(chunk_path))  # 更新进度条
                os.remove(chunk_path)
            
    
    end_time = time.time()
    merge_time = end_time - start_time
    print(f"合并完成，耗时：{merge_time:.2f}秒")

    
    return file_path

if __name__ == "__main__":

    # 1 下载链接
    url = f'https://www.modelscope.cn/datasets/DiffSynth-Studio/ImagePulse-ChangeAddRemove/resolve/master/data/1744012814093745268.tar.gz'

    # 2：设置路径
    file_name = url.split('/')[-1]
    file_path = r"./datasets/"
    file_path = os.path.join(file_path, file_name)

    # 3 线程数 (根据实际情况调整,最好是自己电脑的cpu核数的2倍)
    num_threads = 8

    # 4 开始下载！！！
    multithread_download(url, file_path, num_threads)
    