import json import datasets import datetime _CITATION = """ @inproceedings{zellersluhessel2021merlot, title={MERLOT: Multimodal Neural Script Knowledge Models}, author={Zellers, Rowan and Lu, Ximing and Hessel, Jack and Yu, Youngjae and Park, Jae Sung and Cao, Jize and Farhadi, Ali and Choi, Yejin}, booktitle={Advances in Neural Information Processing Systems 34}, year={2021} } """ _DESCRIPTION = """\ YT-Temporal-180M, a large and diverse dataset of 6 million videos (spanning 180M extracted frames) that covers diverse topics. """ _URL_BASE = "https://rowanzellers.com/merlot/#data" url_numbers = ["00" + str(i) if i < 10 else "0" + str(i) for i in range(100)] _DL_URLS = [ f"https://storage.googleapis.com/merlot/yttemporal180m/yttemporal180m_{num}of100.jsonl.gz" for num in url_numbers ] def json_serializer(o): if isinstance(o, datetime): return str(o) raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable") class yttemporal180mConfig(datasets.BuilderConfig): """BuilderConfig for ActivityNet Captions.""" def __init__(self, **kwargs): super(yttemporal180mConfig, self).__init__( version=datasets.Version("2.1.0", ""), **kwargs ) class yttemporal180m(datasets.GeneratorBasedBuilder): DEFAULT_CONFIG_NAME = "default" BUILDER_CONFIGS = [ yttemporal180mConfig( name="default", description="Default full yttemporal180m dataset" ), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "video_id": datasets.Value("string"), "video_url": datasets.Value("string"), "caption": datasets.Value("string"), "timestamp_start": datasets.Value("float32"), "timestamp_stop": datasets.Value("float32"), "meta": datasets.Value("string"), } ), supervised_keys=None, homepage=_URL_BASE, citation=_CITATION, ) def _split_generators(self, dl_manager): archive_paths = [dl_manager.download_and_extract(url) for url in _DL_URLS] train_split = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"jsonl_files": archive_paths}, ) ] return train_split def _generate_examples(self, jsonl_files): """This function returns the examples.""" idx = 0 for file in jsonl_files: with open(file, encoding="utf-8") as jsonl_file: json_list = list(jsonl_file) for json_str in json_list: infos = json.loads(json_str) id = infos["info"]["display_id"] url = "https://www.youtube.com/watch?v=" + id # Divide video by segments of 15 sec max_sec_per_segment = 15 last_caption_timestamp = infos["subtitles"][-1]["time"] num_chunks = ( int(divmod(last_caption_timestamp, max_sec_per_segment)[0]) + 1 ) time_chunks = [ i * max_sec_per_segment for i in range(num_chunks + 1) ] time_chunk_idx = 0 caption = "" for el in infos["subtitles"]: if ( el["time"] > time_chunks[time_chunk_idx + 1] or el["time"] == last_caption_timestamp ): timestamp_start = float(time_chunks[time_chunk_idx]) timestamp_stop = float(time_chunks[time_chunk_idx + 1]) time_chunk_idx += 1 metadata_dict = { "asr_info": infos["denoised"], "info": infos["info"], "subtitles": infos["subtitles"], "title": infos["info"]["title"], } yield idx, { "video_id": id, "video_url": url, "caption": caption, "timestamp_start": timestamp_start, "timestamp_stop": timestamp_stop if el["time"] != last_caption_timestamp else last_caption_timestamp, "meta": json.dumps( metadata_dict, default=json_serializer, indent=2 ), } idx += 1 caption = "" else: caption += el["word"] + " "