import json
import os
import requests
from datasets import load_dataset
from concurrent.futures import ThreadPoolExecutor, as_completed

download_threads = 10  # 可根据实际情况调整线程数量

all_res = []
ds = load_dataset("parquet", data_files="humancap-hq-311k-raw/*.parquet")
print(ds)


def download_image(url: str, path: str):
    # 如果文件已经存在，则直接返回路径
    if os.path.exists(path):
        return path
    try:
        with requests.get(url, stream=True, timeout=10) as r:
            r.raise_for_status()
            with open(path, "wb") as f:
                for chunk in r.iter_content(8192):
                    f.write(chunk)
        return path
    except requests.exceptions.RequestException as e:
        print(f"Download failed for {url}: {e}")
        return None


def build_item(item, idx):
    new_item = {
        "id": f"{item['image_id']}",
        "image": f"images/{item['image_id']}.jpg",
    }
    new_item["conversations"] = item["human_caption_hq"]
    img_root = "humancap-hq-311k/"
    os.makedirs(os.path.join(img_root, "images"), exist_ok=True)
    tgt_f = os.path.join(img_root, new_item["image"])
    # print(new_item)
    return new_item, item["url"], tgt_f


items = []
res_fake = []
idx = 0
for item in ds["train"]:
    new_item, url, path = build_item(item, f"hc-hq-{idx}")
    items.append((new_item, url, path))
    res_fake.append(new_item)
    idx += 1
    if idx > len(ds["train"]) * 0.6:
        break
    print(f'\rscan {idx}/{len(ds["train"])}', end="", flush=True)

file_path = "humancap-hq-311k.json"
with open(file_path, "w") as f:
    json.dump(res_fake, f, ensure_ascii=False, indent=2)
print("start download...")

with ThreadPoolExecutor(max_workers=download_threads) as executor:
    future_to_item = {
        executor.submit(download_image, url, path): (item, path)
        for item, url, path in items
    }

    for future in as_completed(future_to_item):
        item, path = future_to_item[future]
        # 如果下载成功或者文件已存在，则返回结果为文件路径，视为成功
        if future.result():
            all_res.append(item)
        print(f"\rProcessed {len(all_res)}/{len(items)}", end="", flush=True)

print(f"\nAll results: {len(all_res)}")
file_path = "humancap-hq-311k.json"
with open(file_path, "w") as f:
    json.dump(all_res, f, ensure_ascii=False, indent=2)
