"""
读取download_urls_only.json, 读取所有的download_url, 下载到本地
"""

import json
import os
import subprocess
import tqdm
from urllib.parse import unquote


# 设置下载目录和已完成记录文件
DOWNLOAD_DIR = "/mnt/d/Proj1126Files"
DONE_FILE = "done.json"
BATCH_SIZE = 10  # 累积多少个ID才写入文件
TIMEOUT = 30  # 请求超时时间
# 确保下载目录存在
os.makedirs(DOWNLOAD_DIR, exist_ok=True)

# 读取已完成的下载记录
done_attach_ids = set()
if os.path.exists(DONE_FILE):
    try:
        with open(DONE_FILE, "r", encoding="utf-8") as f:
            done_attach_ids = set(json.load(f))
    except:
        print(f"读取{DONE_FILE}失败，将创建新文件")

# 读取待下载的URL
with open("download_urls_only.json", "r", encoding="utf-8") as f:
    download_urls = json.load(f)

# 过滤掉已下载的文件
remaining_downloads = [item for item in download_urls if item["attach_id"] not in done_attach_ids]

# 下载，如果达到BATCH_SIZE，则将已下载的ID写入文件
download_count = 0
for item in tqdm.tqdm(remaining_downloads, desc="Downloading"):
    url = item["download_url"]
    # 从URL中获取文件名
    if "?n=" in url:
        filename = unquote(url.split("?n=")[1])
    else:
        filename = url.split("/")[-1]
    done_attach_ids.add(item["attach_id"])
    subprocess.run(f"wget -O {filename} {url}", shell=True, cwd=DOWNLOAD_DIR, timeout=TIMEOUT)
    download_count += 1
    if download_count >= BATCH_SIZE:
        with open(DONE_FILE, "w", encoding="utf-8") as f:
            json.dump(list(done_attach_ids), f, ensure_ascii=False, indent=2)
        download_count = 0

