import threading
import requests
import os
import time
import shutil  # 用于删除非空目录
from urllib.parse import urlparse
import multiprocessing

downloaded_lock = threading.Lock()
overall_downloaded = 0
start_time = time.time()

def format_speed(speed):
    # 格式化显示下载速度
    if speed < 1024:
        return f"{speed:.2f}B/s"
    elif speed < 1024 * 1024:
        return f"{speed / 1024:.2f}KB/s"
    else:
        return f"{speed / (1024 * 1024):.2f}MB/s"

def format_time(seconds):
    # 格式化显示时间
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    return f"{int(h)}h {int(m)}m {int(s)}s"

def update_progress(downloaded, total):
    global overall_downloaded
    with downloaded_lock:
        overall_downloaded += downloaded
        progress = overall_downloaded / total * 100
        elapsed_time = time.time() - start_time
        if overall_downloaded > 0:
            estimated_total_time = elapsed_time / (overall_downloaded / total)
            remaining_time = estimated_total_time - elapsed_time
            speed = overall_downloaded / elapsed_time
            print(f"\rProgress: {progress:.2f}% | Speed: {format_speed(speed)} | ETA: {format_time(remaining_time)}", end="", flush=True)
        else:
            print(f"\rProgress: {progress:.2f}% | Speed: 0.00B/s | ETA: --:--:--", end="", flush=True)

def download_chunk(url, start_byte, end_byte, filename, total_size):
    try:
        headers = {'Range': f'bytes={start_byte}-{end_byte}'}
        response = requests.get(url, headers=headers, stream=True)
        downloaded_bytes = 0
        with open(filename, 'r+b') as f:
            f.seek(start_byte)
            for chunk in response.iter_content(chunk_size=1024):
                if chunk:
                    f.write(chunk)
                    downloaded_bytes += len(chunk)
                    update_progress(len(chunk), total_size)
        return downloaded_bytes
    except Exception as e:
        print(f"[ERROR] 下载块失败: {e}")
        return 0

def check_existing_file(file_path, url):
    # 检查现有文件并返回已下载的字节数
    if not os.path.exists(file_path):
        return 0

    file_size = os.path.getsize(file_path)
    if file_size == 0:
        return 0

    # 获取在线文件大小
    response = requests.head(url)
    online_size = int(response.headers.get('Content-Length', 0))

    if file_size > online_size:
        print("本地文件大于在线文件，可能文件已经完整下载。")
        return -1
    elif file_size < online_size:
        print(f"检测到部分下载的文件，将继续下载剩余部分: {file_size}/{online_size} bytes")
        return file_size
    else:
        print("检测到完整的下载文件，如果要重新下载，请先删除现有文件。")
        return -1

def download_file_process(url, num_threads=4, process_id=1):
    global overall_downloaded, start_time
    if process_id == 1:
        overall_downloaded = 0
        start_time = time.time()

    response = requests.head(url)
    total_size = int(response.headers.get('Content-Length', 0))

    if total_size == 0:
        print("无法获取文件大小，下载无法进行。")
        return

    parsed_url = urlparse(url)
    filename = os.path.basename(parsed_url.path)

    download_dir = os.path.join(os.getcwd(), "downloads")
    # 仅由第一个进程创建目录，避免多进程并发创建冲突
    if process_id == 1:
        os.makedirs(download_dir, exist_ok=True)
    file_path = os.path.join(download_dir, filename)

    downloaded_bytes = check_existing_file(file_path, url)
    if downloaded_bytes == -1:
        return

    if downloaded_bytes == 0 or not os.path.exists(file_path):
        # 仅由第一个进程创建空文件
        if process_id == 1:
            with open(file_path, 'wb') as f:
                f.truncate(total_size)

    if process_id == 1:
        overall_downloaded = downloaded_bytes

    chunk_size = (total_size - downloaded_bytes) // num_threads

    threads = []
    for i in range(num_threads):
        start_byte = downloaded_bytes + i * chunk_size
        end_byte = downloaded_bytes + (i + 1) * chunk_size - 1 if i < num_threads - 1 else total_size - 1
        thread = threading.Thread(target=download_chunk, args=(url, start_byte, end_byte, file_path, total_size))
        threads.append(thread)
        thread.start()

    for thread in threads:
        thread.join()

    print(f"\nDownload completed by process {process_id}.")

def cleanup(download_dir="downloads"):
    """仅在下载目录为空时清理临时文件夹"""
    if os.path.exists(download_dir):
        try:
            if not os.listdir(download_dir):  # 检查目录是否为空
                shutil.rmtree(download_dir)
                print(f"\n已清理空的临时文件夹: {download_dir}")
            else:
                print(f"保留非空文件夹: {download_dir}（可能包含未完成或成功的下载）")
        except Exception as e:
            print(f"[ERROR] 清理临时文件夹失败: {e}")

def main():
    url = input("请输入要下载的URL: ")
    num_threads = int(input("请输入使用的线程数: "))
    num_processes = int(input("请输入使用的进程数: "))

    processes = []
    for i in range(num_processes):
        p = multiprocessing.Process(target=download_file_process, args=(url, num_threads, i + 1))
        processes.append(p)
        p.start()

    for p in processes:
        p.join()

    cleanup()  #清理临时文件夹

if __name__ == "__main__":
    main()