MSCOCO / MSCOCO.py
shunk031's picture
Initialize (#1)
1d9dac8 unverified
raw history blame
No virus
29.1 kB
import json
import logging
import os
from collections import defaultdict
from dataclasses import asdict, dataclass
from typing import (
Any,
Dict,
Final,
Iterator,
List,
Literal,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
get_args,
)
import datasets as ds
import numpy as np
from datasets.data_files import DataFilesDict
from PIL import Image
from PIL.Image import Image as PilImage
from pycocotools import mask as cocomask
from tqdm.auto import tqdm
logger = logging.getLogger(__name__)
JsonDict = Dict[str, Any]
ImageId = int
AnnotationId = int
LicenseId = int
CategoryId = int
Bbox = Tuple[float, float, float, float]
MscocoSplits = Literal["train", "val", "test"]
KEYPOINT_STATE: Final[List[str]] = ["unknown", "invisible", "visible"]
_CITATION = """
"""
_DESCRIPTION = """
"""
_HOMEPAGE = """
"""
_LICENSE = """
"""
_URLS = {
"2014": {
"images": {
"train": "http://images.cocodataset.org/zips/train2014.zip",
"validation": "http://images.cocodataset.org/zips/val2014.zip",
"test": "http://images.cocodataset.org/zips/test2014.zip",
},
"annotations": {
"train_validation": "http://images.cocodataset.org/annotations/annotations_trainval2014.zip",
"test_image_info": "http://images.cocodataset.org/annotations/image_info_test2014.zip",
},
},
"2015": {
"images": {
"test": "http://images.cocodataset.org/zips/test2015.zip",
},
"annotations": {
"test_image_info": "http://images.cocodataset.org/annotations/image_info_test2015.zip",
},
},
"2017": {
"images": {
"train": "http://images.cocodataset.org/zips/train2017.zip",
"validation": "http://images.cocodataset.org/zips/val2017.zip",
"test": "http://images.cocodataset.org/zips/test2017.zip",
"unlabeled": "http://images.cocodataset.org/zips/unlabeled2017.zip",
},
"annotations": {
"train_validation": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip",
"stuff_train_validation": "http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip",
"panoptic_train_validation": "http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip",
"test_image_info": "http://images.cocodataset.org/annotations/image_info_test2017.zip",
"unlabeled": "http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip",
},
},
}
@dataclass
class AnnotationInfo(object):
description: str
url: str
version: str
year: str
contributor: str
date_created: str
@classmethod
def from_dict(cls, json_dict: JsonDict) -> "AnnotationInfo":
return cls(**json_dict)
@dataclass
class LicenseData(object):
url: str
license_id: LicenseId
name: str
@classmethod
def from_dict(cls, json_dict: JsonDict) -> "LicenseData":
return cls(
license_id=json_dict["id"],
url=json_dict["url"],
name=json_dict["name"],
)
@dataclass
class ImageData(object):
image_id: ImageId
license_id: LicenseId
file_name: str
coco_url: str
height: int
width: int
date_captured: str
flickr_url: str
@classmethod
def from_dict(cls, json_dict: JsonDict) -> "ImageData":
return cls(
image_id=json_dict["id"],
license_id=json_dict["license"],
file_name=json_dict["file_name"],
coco_url=json_dict["coco_url"],
height=json_dict["height"],
width=json_dict["width"],
date_captured=json_dict["date_captured"],
flickr_url=json_dict["flickr_url"],
)
@property
def shape(self) -> Tuple[int, int]:
return (self.height, self.width)
@dataclass
class CategoryData(object):
category_id: int
name: str
supercategory: str
@classmethod
def from_dict(cls, json_dict: JsonDict) -> "CategoryData":
return cls(
category_id=json_dict["id"],
name=json_dict["name"],
supercategory=json_dict["supercategory"],
)
@dataclass
class AnnotationData(object):
annotation_id: AnnotationId
image_id: ImageId
@dataclass
class CaptionsAnnotationData(AnnotationData):
caption: str
@classmethod
def from_dict(cls, json_dict: JsonDict) -> "CaptionsAnnotationData":
return cls(
annotation_id=json_dict["id"],
image_id=json_dict["image_id"],
caption=json_dict["caption"],
)
class UncompressedRLE(TypedDict):
counts: List[int]
size: Tuple[int, int]
class CompressedRLE(TypedDict):
counts: bytes
size: Tuple[int, int]
@dataclass
class InstancesAnnotationData(AnnotationData):
segmentation: Union[np.ndarray, CompressedRLE]
area: float
iscrowd: bool
bbox: Tuple[float, float, float, float]
category_id: int
@classmethod
def compress_rle(
cls,
segmentation: Union[List[List[float]], UncompressedRLE],
iscrowd: bool,
height: int,
width: int,
) -> CompressedRLE:
if iscrowd:
rle = cocomask.frPyObjects(segmentation, h=height, w=width)
else:
rles = cocomask.frPyObjects(segmentation, h=height, w=width)
rle = cocomask.merge(rles)
return rle # type: ignore
@classmethod
def rle_segmentation_to_binary_mask(
cls, segmentation, iscrowd: bool, height: int, width: int
) -> np.ndarray:
rle = cls.compress_rle(
segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
)
return cocomask.decode(rle) # type: ignore
@classmethod
def rle_segmentation_to_mask(
cls,
segmentation: Union[List[List[float]], UncompressedRLE],
iscrowd: bool,
height: int,
width: int,
) -> np.ndarray:
binary_mask = cls.rle_segmentation_to_binary_mask(
segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
)
return binary_mask * 255
@classmethod
def from_dict(
cls,
json_dict: JsonDict,
images: Dict[ImageId, ImageData],
decode_rle: bool,
) -> "InstancesAnnotationData":
segmentation = json_dict["segmentation"]
image_id = json_dict["image_id"]
image_data = images[image_id]
iscrowd = bool(json_dict["iscrowd"])
if decode_rle:
segmentation_mask = cls.rle_segmentation_to_mask(
segmentation=segmentation,
iscrowd=iscrowd,
height=image_data.height,
width=image_data.width,
)
assert segmentation_mask.shape == image_data.shape
else:
segmentation_mask = cls.compress_rle(
segmentation=segmentation,
iscrowd=iscrowd,
height=image_data.height,
width=image_data.width,
)
return cls(
#
# for AnnotationData
#
annotation_id=json_dict["id"],
image_id=image_id,
#
# for InstancesAnnotationData
#
segmentation=segmentation_mask,
area=json_dict["area"],
iscrowd=iscrowd,
bbox=json_dict["bbox"],
category_id=json_dict["category_id"],
)
@dataclass
class PersonKeypoint(object):
x: int
y: int
v: int
state: str
@dataclass
class PersonKeypointsAnnotationData(InstancesAnnotationData):
num_keypoints: int
keypoints: List[PersonKeypoint]
@classmethod
def v_keypoint_to_state(cls, keypoint_v: int) -> str:
return KEYPOINT_STATE[keypoint_v]
@classmethod
def get_person_keypoints(
cls, flatten_keypoints: List[int], num_keypoints: int
) -> List[PersonKeypoint]:
keypoints_x = flatten_keypoints[0::3]
keypoints_y = flatten_keypoints[1::3]
keypoints_v = flatten_keypoints[2::3]
assert len(keypoints_x) == len(keypoints_y) == len(keypoints_v)
keypoints = [
PersonKeypoint(x=x, y=y, v=v, state=cls.v_keypoint_to_state(v))
for x, y, v in zip(keypoints_x, keypoints_y, keypoints_v)
]
assert len([kp for kp in keypoints if kp.state != "unknown"]) == num_keypoints
return keypoints
@classmethod
def from_dict(
cls,
json_dict: JsonDict,
images: Dict[ImageId, ImageData],
decode_rle: bool,
) -> "PersonKeypointsAnnotationData":
segmentation = json_dict["segmentation"]
image_id = json_dict["image_id"]
image_data = images[image_id]
iscrowd = bool(json_dict["iscrowd"])
if decode_rle:
segmentation_mask = cls.rle_segmentation_to_mask(
segmentation=segmentation,
iscrowd=iscrowd,
height=image_data.height,
width=image_data.width,
)
assert segmentation_mask.shape == image_data.shape
else:
segmentation_mask = cls.compress_rle(
segmentation=segmentation,
iscrowd=iscrowd,
height=image_data.height,
width=image_data.width,
)
flatten_keypoints = json_dict["keypoints"]
num_keypoints = json_dict["num_keypoints"]
keypoints = cls.get_person_keypoints(flatten_keypoints, num_keypoints)
return cls(
#
# for AnnotationData
#
annotation_id=json_dict["id"],
image_id=image_id,
#
# for InstancesAnnotationData
#
segmentation=segmentation_mask,
area=json_dict["area"],
iscrowd=iscrowd,
bbox=json_dict["bbox"],
category_id=json_dict["category_id"],
#
# PersonKeypointsAnnotationData
#
num_keypoints=num_keypoints,
keypoints=keypoints,
)
class LicenseDict(TypedDict):
license_id: LicenseId
name: str
url: str
class BaseExample(TypedDict):
image_id: ImageId
image: PilImage
file_name: str
coco_url: str
height: int
width: int
date_captured: str
flickr_url: str
license_id: LicenseId
license: LicenseDict
class CaptionAnnotationDict(TypedDict):
annotation_id: AnnotationId
caption: str
class CaptionExample(BaseExample):
annotations: List[CaptionAnnotationDict]
def generate_captions_examples(
image_dir: str,
images: Dict[ImageId, ImageData],
annotations: Dict[ImageId, List[CaptionsAnnotationData]],
licenses: Dict[LicenseId, LicenseData],
) -> Iterator[Tuple[int, CaptionExample]]:
for idx, image_id in enumerate(images.keys()):
image_data = images[image_id]
image_anns = annotations[image_id]
assert len(image_anns) > 0
image = _load_image(
image_path=os.path.join(image_dir, image_data.file_name),
)
example = asdict(image_data)
example["image"] = image
example["license"] = asdict(licenses[image_data.license_id])
example["annotations"] = []
for ann in image_anns:
example["annotations"].append(asdict(ann))
yield idx, example # type: ignore
class CategoryDict(TypedDict):
category_id: CategoryId
name: str
supercategory: str
class InstanceAnnotationDict(TypedDict):
annotation_id: AnnotationId
area: float
bbox: Bbox
image_id: ImageId
category_id: CategoryId
category: CategoryDict
iscrowd: bool
segmentation: np.ndarray
class InstanceExample(BaseExample):
annotations: List[InstanceAnnotationDict]
def generate_instances_examples(
image_dir: str,
images: Dict[ImageId, ImageData],
annotations: Dict[ImageId, List[InstancesAnnotationData]],
licenses: Dict[LicenseId, LicenseData],
categories: Dict[CategoryId, CategoryData],
) -> Iterator[Tuple[int, InstanceExample]]:
for idx, image_id in enumerate(images.keys()):
image_data = images[image_id]
image_anns = annotations[image_id]
if len(image_anns) < 1:
logger.warning(f"No annotation found for image id: {image_id}.")
continue
image = _load_image(
image_path=os.path.join(image_dir, image_data.file_name),
)
example = asdict(image_data)
example["image"] = image
example["license"] = asdict(licenses[image_data.license_id])
example["annotations"] = []
for ann in image_anns:
ann_dict = asdict(ann)
category = categories[ann.category_id]
ann_dict["category"] = asdict(category)
example["annotations"].append(ann_dict)
yield idx, example # type: ignore
class KeypointDict(TypedDict):
x: int
y: int
v: int
state: str
class PersonKeypointAnnotationDict(InstanceAnnotationDict):
num_keypoints: int
keypoints: List[KeypointDict]
class PersonKeypointExample(BaseExample):
annotations: List[PersonKeypointAnnotationDict]
def generate_person_keypoints_examples(
image_dir: str,
images: Dict[ImageId, ImageData],
annotations: Dict[ImageId, List[PersonKeypointsAnnotationData]],
licenses: Dict[LicenseId, LicenseData],
categories: Dict[CategoryId, CategoryData],
) -> Iterator[Tuple[int, PersonKeypointExample]]:
for idx, image_id in enumerate(images.keys()):
image_data = images[image_id]
image_anns = annotations[image_id]
if len(image_anns) < 1:
# If there are no persons in the image,
# no keypoint annotations will be assigned.
continue
image = _load_image(
image_path=os.path.join(image_dir, image_data.file_name),
)
example = asdict(image_data)
example["image"] = image
example["license"] = asdict(licenses[image_data.license_id])
example["annotations"] = []
for ann in image_anns:
ann_dict = asdict(ann)
category = categories[ann.category_id]
ann_dict["category"] = asdict(category)
example["annotations"].append(ann_dict)
yield idx, example # type: ignore
class MsCocoConfig(ds.BuilderConfig):
YEARS: Tuple[int, ...] = (
2014,
2017,
)
TASKS: Tuple[str, ...] = (
"captions",
"instances",
"person_keypoints",
)
def __init__(
self,
year: int,
coco_task: Union[str, Sequence[str]],
version: Optional[Union[ds.Version, str]],
decode_rle: bool = False,
data_dir: Optional[str] = None,
data_files: Optional[DataFilesDict] = None,
description: Optional[str] = None,
) -> None:
super().__init__(
name=self.config_name(year=year, task=coco_task),
version=version,
data_dir=data_dir,
data_files=data_files,
description=description,
)
self._check_year(year)
self._check_task(coco_task)
self._year = year
self._task = coco_task
self.decode_rle = decode_rle
def _check_year(self, year: int) -> None:
assert year in self.YEARS, year
def _check_task(self, task: Union[str, Sequence[str]]) -> None:
if isinstance(task, str):
assert task in self.TASKS, task
elif isinstance(task, list) or isinstance(task, tuple):
for t in task:
assert self.TASKS, task
else:
raise ValueError(f"Invalid task: {task}")
@property
def year(self) -> int:
return self._year
@property
def task(self) -> str:
if isinstance(self._task, str):
return self._task
elif isinstance(self._task, list) or isinstance(self._task, tuple):
return "-".join(sorted(self._task))
else:
raise ValueError(f"Invalid task: {self._task}")
@classmethod
def config_name(cls, year: int, task: Union[str, Sequence[str]]) -> str:
if isinstance(task, str):
return f"{year}-{task}"
elif isinstance(task, list) or isinstance(task, tuple):
task = "-".join(task)
return f"{year}-{task}"
else:
raise ValueError(f"Invalid task: {task}")
def _load_image(image_path: str) -> PilImage:
return Image.open(image_path)
def _load_annotation_json(ann_file_path: str) -> JsonDict:
logger.info(f"Load annotation json from {ann_file_path}")
with open(ann_file_path, "r") as rf:
ann_json = json.load(rf)
return ann_json
def _load_licenses_data(license_dicts: List[JsonDict]) -> Dict[LicenseId, LicenseData]:
licenses = {}
for license_dict in license_dicts:
license_data = LicenseData.from_dict(license_dict)
licenses[license_data.license_id] = license_data
return licenses
def _load_images_data(
image_dicts: List[JsonDict],
tqdm_desc: str = "Load images",
) -> Dict[ImageId, ImageData]:
images = {}
for image_dict in tqdm(image_dicts, desc=tqdm_desc):
image_data = ImageData.from_dict(image_dict)
images[image_data.image_id] = image_data
return images
def _load_categories_data(
category_dicts: List[JsonDict],
tqdm_desc: str = "Load categories",
) -> Dict[CategoryId, CategoryData]:
categories = {}
for category_dict in tqdm(category_dicts, desc=tqdm_desc):
category_data = CategoryData.from_dict(category_dict)
categories[category_data.category_id] = category_data
return categories
def _load_captions_data(
ann_dicts: List[JsonDict],
tqdm_desc: str = "Load captions data",
) -> Dict[ImageId, List[CaptionsAnnotationData]]:
annotations = defaultdict(list)
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
ann_data = CaptionsAnnotationData.from_dict(ann_dict)
annotations[ann_data.image_id].append(ann_data)
return annotations
def _load_instances_data(
ann_dicts: List[JsonDict],
images: Dict[ImageId, ImageData],
decode_rle: bool,
tqdm_desc: str = "Load instances data",
) -> Dict[ImageId, List[InstancesAnnotationData]]:
annotations = defaultdict(list)
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
ann_data = InstancesAnnotationData.from_dict(
ann_dict, images=images, decode_rle=decode_rle
)
annotations[ann_data.image_id].append(ann_data)
return annotations
def _load_person_keypoints_data(
ann_dicts: List[JsonDict],
images: Dict[ImageId, ImageData],
decode_rle: bool,
tqdm_desc: str = "Load person keypoints data",
) -> Dict[ImageId, List[PersonKeypointsAnnotationData]]:
annotations = defaultdict(list)
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
ann_data = PersonKeypointsAnnotationData.from_dict(
ann_dict, images=images, decode_rle=decode_rle
)
annotations[ann_data.image_id].append(ann_data)
return annotations
def get_features_base_dict():
return {
"image_id": ds.Value("int64"),
"image": ds.Image(),
"file_name": ds.Value("string"),
"coco_url": ds.Value("string"),
"height": ds.Value("int32"),
"width": ds.Value("int32"),
"date_captured": ds.Value("string"),
"flickr_url": ds.Value("string"),
"license_id": ds.Value("int32"),
"license": {
"url": ds.Value("string"),
"license_id": ds.Value("int8"),
"name": ds.Value("string"),
},
}
def get_features_instance_dict(decode_rle: bool):
if decode_rle:
segmentation_feature = ds.Image()
else:
segmentation_feature = {
"counts": ds.Sequence(ds.Value("int64")),
"size": ds.Sequence(ds.Value("int32")),
}
return {
"annotation_id": ds.Value("int64"),
"image_id": ds.Value("int64"),
"segmentation": segmentation_feature,
"area": ds.Value("float32"),
"iscrowd": ds.Value("bool"),
"bbox": ds.Sequence(ds.Value("float32"), length=4),
"category_id": ds.Value("int32"),
"category": {
"category_id": ds.Value("int32"),
"name": ds.Value("string"),
"supercategory": ds.Value("string"),
},
}
def get_features_captions() -> ds.Features:
features_dict = get_features_base_dict()
annotations = ds.Sequence(
{
"annotation_id": ds.Value("int64"),
"image_id": ds.Value("int64"),
"caption": ds.Value("string"),
}
)
features_dict.update({"annotations": annotations})
return ds.Features(features_dict)
def get_features_instances(decode_rle: bool) -> ds.Features:
features_dict = get_features_base_dict()
annotations = ds.Sequence(get_features_instance_dict(decode_rle=decode_rle))
features_dict.update({"annotations": annotations})
return ds.Features(features_dict)
def get_features_person_keypoints(decode_rle: bool) -> ds.Features:
features_dict = get_features_base_dict()
features_instance_dict = get_features_instance_dict(decode_rle=decode_rle)
features_instance_dict.update(
{
"keypoints": ds.Sequence(
{
"state": ds.Value("string"),
"x": ds.Value("int32"),
"y": ds.Value("int32"),
"v": ds.Value("int32"),
}
),
"num_keypoints": ds.Value("int32"),
}
)
annotations = ds.Sequence(features_instance_dict)
features_dict.update({"annotations": annotations})
return ds.Features(features_dict)
def dataset_configs(year: int, version: ds.Version) -> List[MsCocoConfig]:
return [
MsCocoConfig(
year=year,
coco_task="captions",
version=version,
),
MsCocoConfig(
year=year,
coco_task="instances",
version=version,
),
MsCocoConfig(
year=year,
coco_task="person_keypoints",
version=version,
),
MsCocoConfig(
year=year,
coco_task=("captions", "instances"),
version=version,
),
MsCocoConfig(
year=year,
coco_task=("captions", "person_keypoints"),
version=version,
),
]
def configs_2014(version: ds.Version) -> List[MsCocoConfig]:
return dataset_configs(year=2014, version=version)
def configs_2017(version: ds.Version) -> List[MsCocoConfig]:
return dataset_configs(year=2017, version=version)
class MsCocoDataset(ds.GeneratorBasedBuilder):
VERSION = ds.Version("1.0.0")
BUILDER_CONFIG_CLASS = MsCocoConfig
BUILDER_CONFIGS = configs_2014(version=VERSION) + configs_2017(version=VERSION)
@property
def year(self) -> int:
config: MsCocoConfig = self.config # type: ignore
return config.year
@property
def task(self) -> str:
config: MsCocoConfig = self.config # type: ignore
return config.task
def _info(self) -> ds.DatasetInfo:
if self.task == "captions":
features = get_features_captions()
elif self.task == "instances":
features = get_features_instances(
decode_rle=self.config.decode_rle, # type: ignore
)
elif self.task == "person_keypoints":
features = get_features_person_keypoints(
decode_rle=self.config.decode_rle, # type: ignore
)
else:
raise ValueError(f"Invalid task: {self.task}")
return ds.DatasetInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage=_HOMEPAGE,
license=_LICENSE,
features=features,
)
def _split_generators(self, dl_manager: ds.DownloadManager):
file_paths = dl_manager.download_and_extract(_URLS[f"{self.year}"])
imgs = file_paths["images"] # type: ignore
anns = file_paths["annotations"] # type: ignore
return [
ds.SplitGenerator(
name=ds.Split.TRAIN, # type: ignore
gen_kwargs={
"base_image_dir": imgs["train"],
"base_annotation_dir": anns["train_validation"],
"split": "train",
},
),
ds.SplitGenerator(
name=ds.Split.VALIDATION, # type: ignore
gen_kwargs={
"base_image_dir": imgs["validation"],
"base_annotation_dir": anns["train_validation"],
"split": "val",
},
),
# ds.SplitGenerator(
# name=ds.Split.TEST, # type: ignore
# gen_kwargs={
# "base_image_dir": imgs["test"],
# "test_image_info_path": anns["test_image_info"],
# "split": "test",
# },
# ),
]
def _generate_train_val_examples(
self, split: str, base_image_dir: str, base_annotation_dir: str
):
image_dir = os.path.join(base_image_dir, f"{split}{self.year}")
ann_dir = os.path.join(base_annotation_dir, "annotations")
ann_file_path = os.path.join(ann_dir, f"{self.task}_{split}{self.year}.json")
ann_json = _load_annotation_json(ann_file_path=ann_file_path)
# info = AnnotationInfo.from_dict(ann_json["info"])
licenses = _load_licenses_data(license_dicts=ann_json["licenses"])
images = _load_images_data(image_dicts=ann_json["images"])
category_dicts = ann_json.get("categories")
categories = (
_load_categories_data(category_dicts=category_dicts)
if category_dicts is not None
else None
)
config: MsCocoConfig = self.config # type: ignore
if config.task == "captions":
yield from generate_captions_examples(
annotations=_load_captions_data(
ann_dicts=ann_json["annotations"],
),
image_dir=image_dir,
images=images,
licenses=licenses,
)
elif config.task == "instances":
assert categories is not None
yield from generate_instances_examples(
annotations=_load_instances_data(
images=images,
ann_dicts=ann_json["annotations"],
decode_rle=self.config.decode_rle, # type: ignore
),
categories=categories,
image_dir=image_dir,
images=images,
licenses=licenses,
)
elif config.task == "person_keypoints":
assert categories is not None
yield from generate_person_keypoints_examples(
annotations=_load_person_keypoints_data(
images=images,
ann_dicts=ann_json["annotations"],
decode_rle=self.config.decode_rle, # type: ignore
),
categories=categories,
image_dir=image_dir,
images=images,
licenses=licenses,
)
else:
raise ValueError(f"Invalid task: {config.task}")
def _generate_test_examples(self, test_image_info_path: str):
raise NotImplementedError
def _generate_examples(
self,
split: MscocoSplits,
base_image_dir: Optional[str] = None,
base_annotation_dir: Optional[str] = None,
test_image_info_path: Optional[str] = None,
):
if split == "test" and test_image_info_path is not None:
yield from self._generate_test_examples(
test_image_info_path=test_image_info_path
)
elif (
split in get_args(MscocoSplits)
and base_image_dir is not None
and base_annotation_dir is not None
):
yield from self._generate_train_val_examples(
split=split,
base_image_dir=base_image_dir,
base_annotation_dir=base_annotation_dir,
)
else:
raise ValueError(
f"Invalid arguments: split = {split}, "
f"base_image_dir = {base_image_dir}, "
f"base_annotation_dir = {base_annotation_dir}, "
f"test_image_info_path = {test_image_info_path}",
)