vlm / hf_generator_vqav2.py
dunghuynh
final
66b9156
raw
history blame
4.16 kB
import glob
import json
import os
from tqdm import tqdm
import datasets
import yaml
from PIL import Image as PilImage
from transformers import AutoTokenizer
import os
import shutil
PilImage.MAX_IMAGE_PIXELS = 11178956970
import tarfile
with open("./config.yaml", "r") as file:
config = yaml.safe_load(file)
def generate_new_path(old_path, prefix="COCO_train2014_"):
directory, old_filename = os.path.split(old_path)
new_filename = f"{prefix}{old_filename}"
return os.path.join(directory, new_filename)
def read_image(image, image_dir):
# image = image.replace("vqav2/", "")
image_path = os.path.join(image_dir, image)
return PilImage.open(generate_new_path(image_path))
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.},
year={2020}
}
"""
VERSION = "1.0.0"
_HOMEPAGE = ""
_LICENSE = ""
_URLS = {
"image": config["image_path"],
"json_files": "./metadata/speech_vqav2",
}
class PDFText(datasets.GeneratorBasedBuilder):
"""PDF text Dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="speech_vqav2",
version=VERSION,
description="speech_vqav2",
),
]
def _info(self):
description = "This new dataset is designed to solve education problems."
features = datasets.Features(
{
"id": datasets.Value("int64"),
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"dataset": datasets.Value("string"),
"task": datasets.Value("string"),
"image": [datasets.Image()],
}
)
return datasets.DatasetInfo(
description=description,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
image_data_dir = _URLS["image"]
json_files_data_dir = _URLS["json_files"]
split_files = [
os.path.join(json_files_data_dir, f)
for f in os.listdir(json_files_data_dir)
if f.endswith('.json')
]
split_files.sort()
split_index = int(len(split_files) * 0.98)
train_data = split_files[:split_index]
val_data = split_files[split_index:]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": train_data,
"image_dir": image_data_dir
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": val_data,
"image_dir": image_data_dir
},
),
]
def _generate_examples(self, filepath, image_dir):
"""Yields examples from all JSON files with globally unique IDs."""
global_id = 0 # Keep track of global ID across all files
for json_file in filepath:
# try:
file_name = os.path.basename(json_file)
print(file_name)
with open(json_file, 'r') as file:
data = json.load(file)
# data = data[:10] # Limiting to 10 examples as in original
for item in tqdm(data, desc=f"Processing {os.path.basename(json_file)}"):
# try:
# Try to read the image
image = [read_image(item["image"], image_dir)]
# if image
# Only yield if image loading succeeds
yield global_id, {
"id": global_id,
"image": image,
"task": item["task"],
"dataset": "vqav2",
"question": item["question"],
"answer": item["answer"],
}
global_id += 1 # Increment only on successful yield