import glob import os from functools import partial import datasets VERSION = datasets.Version("0.0.1") SUBSET_NAMES = [ "kneset16", "kneset17", "knesset_tagged", ] class KnessetMeetingsCorpus(datasets.GeneratorBasedBuilder): """Knesset meetings corpus""" BUILDER_CONFIGS = [ datasets.BuilderConfig(name=name, version=VERSION, description=f"{name} meetings corpus") for name in SUBSET_NAMES ] def _info(self): return datasets.DatasetInfo( description="A corpus of transcriptions of Knesset (Israeli parliament) meetings between January 2004 and November 2005", features=datasets.Features( { "path": datasets.Value("string"), "text": datasets.Value("string"), } ), homepage="https://zenodo.org/record/2707356", citation="""TODO""", ) def _split_generators(self, dl_manager): downloader = partial( lambda split: dl_manager.download_and_extract( f"data/{self.config.name}.tar.gz"), ) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "root_path": downloader("train"), "split": "train", "subset_name": self.config.name, }, ) ] def _generate_examples(self, root_path, split, subset_name): data_folder = os.path.join(root_path, subset_name) if subset_name == "knesset_tagged": for xml_file in glob.glob(f"{data_folder}/16/*.xml"): uid = os.path.splitext(os.path.basename(xml_file))[0] yield uid, { "path": xml_file, "text": None, } else: for txt_file in glob.glob(f"{data_folder}/txt/*.txt"): uid = os.path.splitext(os.path.basename(txt_file))[0] docx_file = os.path.join(data_folder, "docx", f"{uid}.docx") yield uid, { "path": docx_file, "text": open(txt_file, "r").read(), }