pybullet_img2img / pybullet_img2img.py
pranjali-pathre
video 19
756acd0
import os
from xml.etree import ElementTree as ET
import datasets
_DESCRIPTION = """\
The dataset img2img data.
"""
_NAME = "pybullet_img2img"
_HOMEPAGE = f"https://huggingface.co/datasets/pranjalipathre/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/pranjalipathre/{_NAME}/resolve/main/data/"
# _DATA = f"/home/pranjali/Documents/Research/pybullet/img2img/{_NAME}/data/"
class i2iDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
# sapien-doors sequentail dataset. - 5.6k
datasets.BuilderConfig(name="video_00", data_dir=f"{_DATA}video_00.zip"),
# sapien-doors dataset with domain randomization. - 26.4k
datasets.BuilderConfig(name="video_01", data_dir=f"{_DATA}video_01.zip"),
# sapien-all_objects dataset. - 155k
datasets.BuilderConfig(name="video_02", data_dir=f"{_DATA}video_02.zip"),
# sapien-bottles dataset. - 12.3k
datasets.BuilderConfig(name="video_03", data_dir=f"{_DATA}video_03.zip"),
# sapien dataset. - 2.6k
datasets.BuilderConfig(name="video_04", data_dir=f"{_DATA}video_04.zip"),
# sapien dataset. - 0.8k
datasets.BuilderConfig(name="video_05", data_dir=f"{_DATA}video_05.zip"),
# sapien door (open and closed) trajectories of a scene. - 22.0k
datasets.BuilderConfig(name="video_06", data_dir=f"{_DATA}video_06.zip"),
# sapien door (open) trajectories of a scene. - 17.7k
datasets.BuilderConfig(name="video_07", data_dir=f"{_DATA}video_07.zip"),
# rlbench trajectories of a scene. - 7.637k
datasets.BuilderConfig(name="video_08", data_dir=f"{_DATA}video_08.zip"),
# habitat replicad door (open) trajectories of a scene. - 8.2k
datasets.BuilderConfig(name="video_09", data_dir=f"{_DATA}video_09.zip"),
# rlebnch place sorter trajectories of a scene. - 19k
datasets.BuilderConfig(name="video_10", data_dir=f"{_DATA}video_10.zip"),
# rlebnch window sorter trajectories of a scene. - 5k
datasets.BuilderConfig(name="video_11", data_dir=f"{_DATA}video_11.zip"),
# habitat mp3d door trajectories of a scene. - 9k
datasets.BuilderConfig(name="video_12", data_dir=f"{_DATA}video_12.zip"),
# rlbench charger trajectories of a scene. - 4k
datasets.BuilderConfig(name="video_13", data_dir=f"{_DATA}video_13.zip"),
# rlbench door trajectories of a scene. - 4k
datasets.BuilderConfig(name="video_14", data_dir=f"{_DATA}video_14.zip"),
# real-world cirle sorter trajectories - 4k
datasets.BuilderConfig(name="video_15", data_dir=f"{_DATA}video_15.zip"),
# real-world hexagon sorter trajectories - 4k
datasets.BuilderConfig(name="video_16", data_dir=f"{_DATA}video_16.zip"),
# real-world square sorter trajectories - 4k
datasets.BuilderConfig(name="video_17", data_dir=f"{_DATA}video_17.zip"),
# real-world shape stacker trajectories - 900
datasets.BuilderConfig(name="video_18", data_dir=f"{_DATA}video_18.zip"),
# unity warehouse trajectories - 110
datasets.BuilderConfig(name="video_19", data_dir=f"{_DATA}video_19.zip"),
]
DEFAULT_CONFIG_NAME = "video_13"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"original_image": datasets.Image(),
"edit_prompt": datasets.Value("string"),
"edited_image": datasets.Image(),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=None,
)
def _split_generators(self, dl_manager):
data = dl_manager.download_and_extract(self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data": data,
},
),
]
@staticmethod
def parse_text(root: ET.Element, file: str, index: int) -> dict:
idx = str(index).zfill(6)
ele = root.find(f".//*[@frame='{idx}']")
dt = {
"text": ele.get("text")
}
return dt
def _generate_examples(self, data):
treePath = os.path.join(data, "annotations.xml")
tree = ET.parse(treePath)
root = tree.getroot()
for idx, file in enumerate(sorted(os.listdir(os.path.join(data, "original_images")))):
dat = self.parse_text(root, file, idx)
txt = dat["text"]
yield idx, {
"original_image": os.path.join(data, "original_images", file),
"edit_prompt": txt,
"edited_image": os.path.join(data, "edited_images", file),
}