Datasets:

Languages:
English
ArXiv:
License:

Merge .json and fix broken .webp

#3
by zachL1 - opened

Due to the current lack of consolidated train.json, validation.json and test.json files in this repository, along with numerous corrupted images, it's challenging to use the dataset for training. I've developed two scripts to address these issues, which I hope will be helpful. (If you plan to use them, you'll need to adjust the file paths accordingly.) 😊

The first script extracts the .zip files and merges the .json files from each .zip:

import os
import zipfile
import json
from tqdm import tqdm
from collections import OrderedDict
from PIL import Image

base_dir = "data/imagereward"
for split in ["train", "test", "validation"]:
    os.makedirs(os.path.join(base_dir, "images", split), exist_ok=True)

for split in ["train", "test", "validation"]:
    source_dir = f"data/raw/ImageRewardDB/images/{split}"
    target_dir = f"data/imagereward/images/{split}"
    
    all_json_data = []
    
    # group by prompt_id, keeping the insertion order
    grouped_data = OrderedDict()
    
    print(f"Processing {split} dataset")
    # loop over all zip files
    # unzip and merge json files
    for zip_file in tqdm(sorted(os.listdir(source_dir))):
        if zip_file.endswith('.zip'):
            zip_path = os.path.join(source_dir, zip_file)
            folder_name = zip_file[:-4]
            extract_path = os.path.join(target_dir, folder_name)
            
            os.makedirs(extract_path, exist_ok=True)
            
            with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                zip_ref.extractall(extract_path)
                
            json_file = os.path.join(extract_path, f"{folder_name}.json")
            if not os.path.exists(json_file):
                print(f"Warning: {json_file} does not exist")
                continue

            with open(json_file, 'r') as f:
                json_data = json.load(f)
            for item in json_data:
                prompt_id = item["prompt_id"]
                image_path = item["image_path"]

                if prompt_id not in grouped_data:
                    grouped_data[prompt_id] = {
                        "prompt_id": "",
                        "prompt": "",
                        "classification": "",
                        "image_amount_in_total": "",
                        "image_path": [],
                        "rank": [],
                        "overall_rating": [],
                        "image_text_alignment_rating": [],
                        "fidelity_rating": []
                    }
                
                group = grouped_data[prompt_id]
                
                # set fixed fields
                if not group["prompt_id"]:
                    group["prompt_id"] = prompt_id
                    group["prompt"] = item["prompt"]
                    group["classification"] = item["classification"]
                    group["image_amount_in_total"] = item["image_amount_in_total"]
                else:
                    assert group["prompt"] == item["prompt"]
                    assert group["classification"] == item["classification"]
                    assert group["image_amount_in_total"] == item["image_amount_in_total"]
                # append list fields
                group["image_path"].append(item["image_path"])
                group["rank"].append(item["rank"])
                group["overall_rating"].append(item["overall_rating"])
                group["image_text_alignment_rating"].append(item["image_text_alignment_rating"])
                group["fidelity_rating"].append(item["fidelity_rating"])
    
    # convert grouped data to list
    reorganized_data = list(grouped_data.values())

    # save reorganized json
    merged_json_path = os.path.join(base_dir, f"{split}.json")
    with open(merged_json_path, 'w') as f:
        json.dump(reorganized_data, f)

The second script identifies corrupted images and replaces them with original images from DiffusionDB:

import json
import os
from PIL import Image
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import requests
import zipfile
from io import BytesIO

def check_corrupt_images(json_path, parquet_path, output_json_path):
    """first stage: check and record corrupted images"""
    # if there is already a check result, load it directly
    if os.path.exists(output_json_path):
        with open(output_json_path, 'r') as f:
            return json.load(f)
    
    # initialize corrupted images dictionary, key is part_id, value is {image info list, repair status}
    corrupt_images = {}
    
    # read file
    with open(json_path, 'r') as f:
        data = json.load(f)
    df = pd.read_parquet(parquet_path)
    
    # check corrupted images
    for item in tqdm(data, desc="check corrupted images"):
        for img_path in item['image_path']:
            full_path = os.path.join(base_dir, img_path)
            try:
                img = Image.open(full_path)
                img.verify()
            except (IOError, SyntaxError, FileNotFoundError):
                # if image is corrupted, find the part_id in diffusiondb
                filename = Path(full_path).name
                match = df[df['image_name'] == filename]
                if not match.empty:
                    part_id = int(match['part_id'].iloc[0])
                    if part_id not in corrupt_images:
                        corrupt_images[part_id] = {
                            'images': [],
                            'fixed': False
                        }
                    corrupt_images[part_id]['images'].append({
                        'full_path': full_path,
                        'filename': filename
                    })
                else:
                    print(f"Warning: {full_path} does not exist in diffusiondb")
    
    # save check result
    with open(output_json_path, 'w') as f:
        json.dump(corrupt_images, f, indent=2)
    
    return corrupt_images

def fix_corrupt_images(corrupt_images_dict, output_json_path):
    """second stage: fix corrupted images"""
    os.makedirs('./tmp', exist_ok=True)
    
    # loop over parts that are not fixed
    for part_id, info in tqdm(corrupt_images_dict.items(), desc="fix corrupted images"):
        # skip fixed parts
        if info['fixed']:
            continue
            
        # download corresponding zip file from diffusiondb
        part_id = int(part_id)
        zip_url = f'{hf_site}/datasets/poloclub/diffusiondb/resolve/main/diffusiondb-large-part-1/part-{part_id:06}.zip'
        temp_zip = f'./tmp/part-{part_id:06}.zip'
        try:
            # response = requests.get(zip_url, stream=True)
            # response.raise_for_status()
            # total_size = int(response.headers.get('content-length', 0))
            # # show download progress
            # block_size = 1024  # 1KB
            # temp_zip = b""
            # with tqdm(total=total_size, unit='iB', unit_scale=True, desc=f"downloading part-{part_id:06}.zip") as pbar:
            #     for data in response.iter_content(block_size):
            #         temp_zip += data
            #         pbar.update(len(data))

            cmd = f'aria2c -c "{zip_url}" -d ./tmp -o "part-{part_id:06}.zip"'
            if os.system(cmd) != 0:
                raise Exception("aria2c download failed")
            
            # extract and replace corrupted images from zip file
            with zipfile.ZipFile(temp_zip) as zip_ref:
                for img_info in info['images']:
                    full_path = img_info['full_path']
                    filename = img_info['filename']
                    # ensure target directory exists
                    os.makedirs(os.path.dirname(full_path), exist_ok=True)
                    # extract image from zip file and save
                    with zip_ref.open(f'{filename}') as source:
                        with open(full_path, 'wb') as target:
                            target.write(source.read())
                            
            os.remove(temp_zip)

            # mark this part_id as fixed
            corrupt_images_dict[str(part_id)]['fixed'] = True
            # save updated status
            with open(output_json_path, 'w') as f:
                json.dump(corrupt_images_dict, f, indent=2)
                
        except Exception as e:
            print(f"error when processing part_id {zip_url}: {str(e)}")
            continue
    
    return corrupt_images_dict


base_dir = "data/imagereward"
hf_site = "https://hf-mirror.com" # I use a mirror site instead of huggingface.co

# first stage: check corrupted images
output_json_path = os.path.join(base_dir, 'corrupt_images.json')
corrupt_images = check_corrupt_images(
    os.path.join(base_dir, 'train.json'),
    'data/raw/metadata-large.parquet', # {hf_site}/datasets/poloclub/diffusiondb/resolve/main/metadata-large.parquet
    output_json_path
)

# second stage: fix corrupted images
fixed_results = fix_corrupt_images(corrupt_images, output_json_path)

# print results
for part_id, info in fixed_results.items():
    if not info['fixed']:
        print(f"Part ID {part_id}: {len(info['images'])} corrupted images - not fixed")

Sign up or log in to comment