holylovenia
commited on
Commit
•
0293712
1
Parent(s):
826231c
Upload m3ls.py with huggingface_hub
Browse files
m3ls.py
ADDED
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
SEACrowd Data Loader for M3LS.
|
3 |
+
"""
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from collections.abc import Iterable
|
7 |
+
from copy import deepcopy
|
8 |
+
from typing import Dict, Generator, List, Tuple, Union
|
9 |
+
|
10 |
+
try:
|
11 |
+
import PIL
|
12 |
+
except (ImportError, ModuleNotFoundError):
|
13 |
+
print("Please install `PIL` to load image-based data from M3LS dataloader.")
|
14 |
+
else:
|
15 |
+
PIL.__version__ # to avoid being marked by formatter
|
16 |
+
|
17 |
+
import datasets
|
18 |
+
from datasets.download.download_manager import DownloadManager
|
19 |
+
|
20 |
+
from seacrowd.utils import schemas
|
21 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
22 |
+
from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
|
23 |
+
|
24 |
+
_CITATION = r"""
|
25 |
+
@inproceedings{verma-etal-2023-large,
|
26 |
+
title = "Large Scale Multi-Lingual Multi-Modal Summarization Dataset",
|
27 |
+
author = "Verma, Yash and
|
28 |
+
Jangra, Anubhav and
|
29 |
+
Verma, Raghvendra and
|
30 |
+
Saha, Sriparna",
|
31 |
+
editor = "Vlachos, Andreas and
|
32 |
+
Augenstein, Isabelle",
|
33 |
+
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
|
34 |
+
month = may,
|
35 |
+
year = "2023",
|
36 |
+
address = "Dubrovnik, Croatia",
|
37 |
+
publisher = "Association for Computational Linguistics",
|
38 |
+
url = "https://aclanthology.org/2023.eacl-main.263",
|
39 |
+
doi = "10.18653/v1/2023.eacl-main.263",
|
40 |
+
pages = "3620--3632",
|
41 |
+
}
|
42 |
+
"""
|
43 |
+
|
44 |
+
logger = datasets.logging.get_logger(__name__)
|
45 |
+
|
46 |
+
_LOCAL = False
|
47 |
+
_LANGUAGES = ["ind"]
|
48 |
+
|
49 |
+
|
50 |
+
_DATASETNAME = "m3ls"
|
51 |
+
_DESCRIPTION = r"""
|
52 |
+
The multilingual multimodal summarization dataset (M3LS) consists of over a million instances of document-image pairs
|
53 |
+
along with a professionally annotated multimodal summary for each pair.
|
54 |
+
It is derived from news articles published by the British Broadcasting Corporation (BBC) over a decade and spans 20 total languages,
|
55 |
+
which Indonesian is the only SEA language available on this dataset.
|
56 |
+
"""
|
57 |
+
|
58 |
+
_HOMEPAGE = "https://github.com/anubhav-jangra/M3LS/tree/main"
|
59 |
+
_LICENSE = Licenses.MIT.value
|
60 |
+
|
61 |
+
_URL = "https://drive.google.com/uc?id=1Kznkw7YpRiWpdgH4_SVNwp0uGf3j-5e2"
|
62 |
+
|
63 |
+
_SUPPORTED_TASKS = [Tasks.SUMMARIZATION, Tasks.IMAGE_CAPTIONING]
|
64 |
+
_SOURCE_VERSION = "1.0.0"
|
65 |
+
_SEACROWD_VERSION = "2024.06.20"
|
66 |
+
|
67 |
+
_CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
|
68 |
+
|
69 |
+
|
70 |
+
class M3LSDataset(datasets.GeneratorBasedBuilder):
|
71 |
+
"""M3LS dataset of Indonesian Language (from BBC Indonesian)"""
|
72 |
+
|
73 |
+
BUILDER_CONFIGS = [
|
74 |
+
SEACrowdConfig(
|
75 |
+
name=f"{_DATASETNAME}_source",
|
76 |
+
version=datasets.Version(_SOURCE_VERSION),
|
77 |
+
description=f"{_DATASETNAME} source schema",
|
78 |
+
schema="source",
|
79 |
+
subset_id=f"{_DATASETNAME}",
|
80 |
+
),
|
81 |
+
*[
|
82 |
+
SEACrowdConfig(
|
83 |
+
name=f"{_DATASETNAME}_seacrowd_{cfg_sufix}",
|
84 |
+
version=datasets.Version(_SEACROWD_VERSION),
|
85 |
+
description=f"{_DATASETNAME} seacrowd schema for {task.name}",
|
86 |
+
schema=f"seacrowd_{cfg_sufix}",
|
87 |
+
subset_id=f"{_DATASETNAME}",
|
88 |
+
)
|
89 |
+
for task, cfg_sufix in zip(_SUPPORTED_TASKS, _CONFIG_SUFFIXES_FOR_TASK)
|
90 |
+
],
|
91 |
+
]
|
92 |
+
|
93 |
+
def _info(self) -> datasets.DatasetInfo:
|
94 |
+
_config_schema_name = self.config.schema
|
95 |
+
logger.info(f"Received schema name: {self.config.schema}")
|
96 |
+
|
97 |
+
if _config_schema_name == "source":
|
98 |
+
features = datasets.Features(
|
99 |
+
{
|
100 |
+
"id": datasets.Value("string"),
|
101 |
+
"date": datasets.Value("string"),
|
102 |
+
"url": datasets.Value("string"),
|
103 |
+
"title": datasets.Value("string"),
|
104 |
+
"summary": datasets.Value("string"),
|
105 |
+
"keyword": datasets.Sequence(datasets.Value("string")),
|
106 |
+
"related": datasets.Sequence(datasets.Value("string")),
|
107 |
+
"section_headers": datasets.Sequence(datasets.Value("string")),
|
108 |
+
"paragraphs": datasets.Sequence(datasets.Value("string")),
|
109 |
+
"images": datasets.Sequence(datasets.Image()),
|
110 |
+
"captions": datasets.Sequence(datasets.Value("string")),
|
111 |
+
}
|
112 |
+
)
|
113 |
+
|
114 |
+
# speech-text schema
|
115 |
+
elif _config_schema_name == "seacrowd_t2t":
|
116 |
+
features = schemas.text2text_features
|
117 |
+
|
118 |
+
elif _config_schema_name == "seacrowd_imtext":
|
119 |
+
features = schemas.image_text_features()
|
120 |
+
|
121 |
+
else:
|
122 |
+
raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
|
123 |
+
|
124 |
+
return datasets.DatasetInfo(
|
125 |
+
description=_DESCRIPTION,
|
126 |
+
features=features,
|
127 |
+
homepage=_HOMEPAGE,
|
128 |
+
license=_LICENSE,
|
129 |
+
citation=_CITATION,
|
130 |
+
)
|
131 |
+
|
132 |
+
def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
|
133 |
+
try:
|
134 |
+
import gdown
|
135 |
+
except ImportError:
|
136 |
+
raise ImportError("Please install `gdown` to enable downloading data from google drive.")
|
137 |
+
|
138 |
+
# Download from Google drive
|
139 |
+
output_dir = os.path.join(os.getcwd(), "data", "m3ls")
|
140 |
+
if not os.path.exists(output_dir):
|
141 |
+
os.makedirs(output_dir)
|
142 |
+
output_file = output_dir + "/m3ls.zip"
|
143 |
+
if not os.path.exists(output_file):
|
144 |
+
gdown.download(_URL, str(output_file), fuzzy=True)
|
145 |
+
else:
|
146 |
+
logger.info(f"File already downloaded: {str(output_file)}")
|
147 |
+
|
148 |
+
local_path = os.path.join(dl_manager.extract(output_file).title(), "bbcindonesia")
|
149 |
+
|
150 |
+
# there are two folders all containing json files, namely "processed" and "articles"
|
151 |
+
# both are having articles info with url, text, and accompanied resource scrapped (i.e image & captions, related articles)
|
152 |
+
|
153 |
+
# the "processed" contains only 244 data, which 156 of them doesn't have any title info
|
154 |
+
# whereas "articles" contains 56108 data (the same reported as the wholly data in paper), all having title info
|
155 |
+
|
156 |
+
# no intersection of links for both, nor information provided, hence we will only take "articles" due to matched info w/ their paper
|
157 |
+
# the original paper mentioned 80:10:10 splits for over, but there is no info for such splitting index on the extracted folder
|
158 |
+
article_data_dir = os.path.join(local_path, "articles")
|
159 |
+
|
160 |
+
return [
|
161 |
+
datasets.SplitGenerator(
|
162 |
+
name=datasets.Split.TRAIN,
|
163 |
+
gen_kwargs={
|
164 |
+
"article_data_dir": article_data_dir,
|
165 |
+
"image_folder": os.path.join(local_path, "imagefolder"),
|
166 |
+
},
|
167 |
+
)
|
168 |
+
]
|
169 |
+
|
170 |
+
def _generate_examples(self, article_data_dir: str, image_folder: str) -> Generator[Tuple[int, Dict], None, None]:
|
171 |
+
_config_schema_name = self.config.schema
|
172 |
+
all_image_filename = os.listdir(image_folder)
|
173 |
+
|
174 |
+
idx = 1
|
175 |
+
im_data_idx = 1
|
176 |
+
for filename in os.listdir(article_data_dir):
|
177 |
+
root_data, content_data = self.__json_read_and_process(os.path.join(article_data_dir, filename))
|
178 |
+
|
179 |
+
# for images, it has around 6.7% missing rate (15625 out of 230163)
|
180 |
+
if _config_schema_name == "source":
|
181 |
+
content_data = self.__m3ls_content_data_reconstructor_and_validator(content_data, mode="all")
|
182 |
+
image_path, captions = self.__m3ls_filter_image_and_captions_data(content_data["image_paths"], content_data["captions"], image_folder, all_image_filename)
|
183 |
+
|
184 |
+
yield idx, {
|
185 |
+
"id": idx,
|
186 |
+
"date": root_data["date"],
|
187 |
+
"url": root_data["url"],
|
188 |
+
"title": root_data["title"],
|
189 |
+
"summary": root_data["summary"],
|
190 |
+
"keyword": root_data["keyword"],
|
191 |
+
"related": root_data["related"],
|
192 |
+
"section_headers": content_data["section_headers"],
|
193 |
+
"paragraphs": content_data["paragraphs"],
|
194 |
+
"images": image_path,
|
195 |
+
"captions": captions,
|
196 |
+
}
|
197 |
+
|
198 |
+
elif _config_schema_name == "seacrowd_t2t":
|
199 |
+
content_data = self.__m3ls_content_data_reconstructor_and_validator(content_data, mode="text")
|
200 |
+
yield idx, {
|
201 |
+
"id": idx,
|
202 |
+
"text_1": "\n".join(content_data["paragraphs"]),
|
203 |
+
"text_2": root_data["summary"],
|
204 |
+
"text_1_name": "texts",
|
205 |
+
"text_2_name": "summary",
|
206 |
+
}
|
207 |
+
|
208 |
+
elif _config_schema_name == "seacrowd_imtext":
|
209 |
+
content_data = self.__m3ls_content_data_reconstructor_and_validator(content_data, mode="image")
|
210 |
+
image_path, captions = self.__m3ls_filter_image_and_captions_data(content_data["image_paths"], content_data["captions"], image_folder, all_image_filename, both_exists=True)
|
211 |
+
|
212 |
+
if image_path == []:
|
213 |
+
continue
|
214 |
+
|
215 |
+
for path_idx in range(len(image_path)):
|
216 |
+
yield im_data_idx, {
|
217 |
+
"id": im_data_idx,
|
218 |
+
"image_paths": [image_path[path_idx]],
|
219 |
+
"texts": captions[path_idx],
|
220 |
+
"metadata": {
|
221 |
+
"context": root_data["url"],
|
222 |
+
"labels": None,
|
223 |
+
},
|
224 |
+
}
|
225 |
+
im_data_idx += 1
|
226 |
+
|
227 |
+
else:
|
228 |
+
raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
|
229 |
+
|
230 |
+
idx += 1
|
231 |
+
|
232 |
+
@staticmethod
|
233 |
+
def __check_only_1level_iterables(iter_obj):
|
234 |
+
return all([not isinstance(data, Iterable) or isinstance(data, str) for data in iter_obj])
|
235 |
+
|
236 |
+
@classmethod
|
237 |
+
def __json_read_and_process(cls, path: str) -> Dict:
|
238 |
+
|
239 |
+
# to check (for compulsory keys) and reconstruct (for optional keys) the json data
|
240 |
+
def base_data_reconstructor(json_data: dict, return_split: bool = True) -> Union[Dict, Tuple[Dict, Dict]]:
|
241 |
+
|
242 |
+
# for detecting content-based dict-keys (it's denoted by int-based keys in string type)
|
243 |
+
def parse_or_check_int(val: Union[int, str, float], is_parse: bool = True):
|
244 |
+
try:
|
245 |
+
int(val)
|
246 |
+
except (ValueError, TypeError):
|
247 |
+
return val if is_parse else False
|
248 |
+
else:
|
249 |
+
return int(val) if is_parse else True
|
250 |
+
|
251 |
+
compulsory_keys = ["summary", "url", "title"]
|
252 |
+
optional_keys = ["date", "keyword", "related"]
|
253 |
+
optional_key_mapper = list(zip(optional_keys, ["Not available", [], []]))
|
254 |
+
|
255 |
+
if any(key not in json_data.keys() for key in compulsory_keys):
|
256 |
+
raise KeyError(f"Missing keys of {list(set(compulsory_keys).difference(json_data.keys()))}")
|
257 |
+
|
258 |
+
for key, default_val in optional_key_mapper:
|
259 |
+
_existing_val = json_data.get(key)
|
260 |
+
new_data = {key: json_data.get(key) if _existing_val is not None else default_val}
|
261 |
+
json_data.update(new_data)
|
262 |
+
|
263 |
+
all_content_keys = [key for key in json_data.keys() if parse_or_check_int(key, is_parse=False)]
|
264 |
+
|
265 |
+
if sorted(compulsory_keys + optional_keys + all_content_keys) != sorted(json_data.keys()):
|
266 |
+
raise KeyError("Some keys are unexpectedly missing or present!")
|
267 |
+
|
268 |
+
content_data = {key: json_data[key] for key in all_content_keys}
|
269 |
+
|
270 |
+
if not return_split:
|
271 |
+
json_data.update(content_data)
|
272 |
+
return json_data
|
273 |
+
else:
|
274 |
+
root_data = {key: val for key, val in json_data.items() if key not in all_content_keys}
|
275 |
+
return root_data, content_data
|
276 |
+
|
277 |
+
def non_content_data_validator(json_data: dict):
|
278 |
+
non_content_dtypes = [("url", str), ("title", str), ("date", str), ("summary", str), ("keyword", list), ("related", list)]
|
279 |
+
|
280 |
+
for key, _type in non_content_dtypes:
|
281 |
+
if not isinstance(json_data[key], _type):
|
282 |
+
raise TypeError(f"The dict has key {key} that doesn't match with expected type {_type}!")
|
283 |
+
|
284 |
+
# assert only 1-level for list types
|
285 |
+
if _type == list:
|
286 |
+
if not cls.__check_only_1level_iterables(json_data[key]):
|
287 |
+
raise ValueError(f"Found iterables in {key} for val {json_data[key]}")
|
288 |
+
|
289 |
+
with open(path, "r") as f:
|
290 |
+
json_input = json.load(f)
|
291 |
+
|
292 |
+
base_data, content_data = base_data_reconstructor(json_input)
|
293 |
+
|
294 |
+
non_content_data_validator(base_data)
|
295 |
+
|
296 |
+
return base_data, content_data
|
297 |
+
|
298 |
+
@classmethod
|
299 |
+
def __m3ls_content_data_reconstructor_and_validator(cls, json_content_data: Dict, mode: str = "all") -> Dict:
|
300 |
+
# `mode` variable scope will be shared to all subfunctions under this fn
|
301 |
+
if mode not in ("all", "image", "text"):
|
302 |
+
raise ValueError("Unexpected `mode`! Accepted: 'all', 'image', or 'text'.")
|
303 |
+
|
304 |
+
all_content_ftrs = ("images", "para", "subheading")
|
305 |
+
expected_dtypes = (list, list, str)
|
306 |
+
default_values = ([["", ""]], [], "")
|
307 |
+
|
308 |
+
_all_ftr_validation_info = {all_content_ftrs[_idx]: {"dtype": expected_dtypes[_idx], "default_val": default_values[_idx]} for _idx in range(len(all_content_ftrs))}
|
309 |
+
|
310 |
+
if mode == "all":
|
311 |
+
ftr_idx = list(range(3))
|
312 |
+
elif mode == "image":
|
313 |
+
ftr_idx = list(range(1))
|
314 |
+
elif mode == "text":
|
315 |
+
ftr_idx = list(range(1, 3))
|
316 |
+
|
317 |
+
ftr_validation_info = {all_content_ftrs[_idx]: _all_ftr_validation_info[all_content_ftrs[_idx]] for _idx in ftr_idx}
|
318 |
+
|
319 |
+
def content_data_reconstructor(json_data: dict):
|
320 |
+
json_data = deepcopy(json_data)
|
321 |
+
|
322 |
+
for key, content_dict in json_data.items():
|
323 |
+
for ftr, ftr_info in ftr_validation_info.items():
|
324 |
+
if content_dict.get(ftr) is None:
|
325 |
+
json_data[key][ftr] = ftr_info["default_val"]
|
326 |
+
|
327 |
+
return json_data
|
328 |
+
|
329 |
+
def content_data_validator(content_data: dict):
|
330 |
+
for content_dict in content_data.values():
|
331 |
+
if not isinstance(content_dict, dict):
|
332 |
+
raise TypeError("Unexpected type found on content data!")
|
333 |
+
|
334 |
+
for ftr_name, ftr_info in ftr_validation_info.items():
|
335 |
+
_type = ftr_info["dtype"]
|
336 |
+
if not isinstance(content_dict[ftr_name], _type):
|
337 |
+
raise TypeError(f"Unexpected type found on content {ftr_name} data! Expected {_type}, got {type(content_dict[ftr_name])}")
|
338 |
+
|
339 |
+
if "para" in ftr_validation_info.keys() and not cls.__check_only_1level_iterables(content_dict["para"]):
|
340 |
+
raise ValueError("Found iterable in the 'paragraph' data!")
|
341 |
+
|
342 |
+
if "images" in ftr_validation_info.keys() and not all([isinstance(image_data, list) for image_data in content_dict["images"]]):
|
343 |
+
raise ValueError("Found non-list in the 'images' data!")
|
344 |
+
|
345 |
+
if "images" in ftr_validation_info.keys() and not all([len(image_data) == 2 for image_data in content_dict["images"]]):
|
346 |
+
raise ValueError("Found non-paired tuples in the 'images' data!")
|
347 |
+
|
348 |
+
if "images" in ftr_validation_info.keys() and not all([cls.__check_only_1level_iterables(image_data) for image_data in content_dict["images"]]):
|
349 |
+
raise ValueError("Found iterable in the 'images' individual data!")
|
350 |
+
|
351 |
+
def m3ls_content_data_post_process(content_data: dict) -> Dict:
|
352 |
+
output_json = {}
|
353 |
+
for _ftr in ftr_validation_info.keys():
|
354 |
+
output_data = []
|
355 |
+
for value in content_data.values():
|
356 |
+
output_data.append(value[_ftr])
|
357 |
+
output_json[_ftr] = output_data
|
358 |
+
|
359 |
+
# post process each features
|
360 |
+
if "para" in ftr_validation_info.keys():
|
361 |
+
paragraphs = []
|
362 |
+
for section_data in output_json.pop("para"):
|
363 |
+
paragraphs.append("".join([val for val in section_data if val.strip() != ""]))
|
364 |
+
output_json["paragraphs"] = paragraphs
|
365 |
+
|
366 |
+
if "images" in ftr_validation_info.keys():
|
367 |
+
list_image_paths = []
|
368 |
+
list_captions = []
|
369 |
+
for sectioned_data in output_json.pop("images"):
|
370 |
+
for val in sectioned_data:
|
371 |
+
list_image_paths.append(val[0])
|
372 |
+
list_captions.append("" if val[1] is None else val[1].strip())
|
373 |
+
output_json["image_paths"] = list_image_paths
|
374 |
+
output_json["captions"] = list_captions
|
375 |
+
|
376 |
+
if "subheading" in ftr_validation_info.keys():
|
377 |
+
output_json["section_headers"] = output_json.pop("subheading")
|
378 |
+
|
379 |
+
return output_json
|
380 |
+
|
381 |
+
content_data = content_data_reconstructor(json_content_data)
|
382 |
+
|
383 |
+
content_data_validator(content_data)
|
384 |
+
|
385 |
+
content_data = m3ls_content_data_post_process(content_data)
|
386 |
+
|
387 |
+
return content_data
|
388 |
+
|
389 |
+
@staticmethod
|
390 |
+
def __m3ls_filter_image_and_captions_data(image_data: list, captions_data: list, base_image_folder: str, all_images: list, both_exists: bool = False) -> Tuple[List, List]:
|
391 |
+
image_path, captions = [], []
|
392 |
+
|
393 |
+
if len(captions_data) != len(image_data):
|
394 |
+
raise ValueError("Not a 1-1 mapping of image-captions!")
|
395 |
+
|
396 |
+
for idx, img_path in enumerate(image_data):
|
397 |
+
if img_path in all_images:
|
398 |
+
if both_exists and captions_data[idx] == "":
|
399 |
+
continue
|
400 |
+
image_path.append(os.path.join(base_image_folder, img_path))
|
401 |
+
captions.append(captions_data[idx])
|
402 |
+
|
403 |
+
return image_path, captions
|