import json import os import datasets from datasets import DatasetInfo, DownloadManager class DuReaderConfig(datasets.BuilderConfig): """Config for DuReader dataset""" def __init__(self, name, data_url, **kwargs): super().__init__(name=name, version=datasets.Version("1.0.0", "")) self.data_url = data_url class DuReader(datasets.GeneratorBasedBuilder): """ """ BUILDER_CONFIGS = [ DuReaderConfig( name="robust", data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_robust-data.tar.gz", ), DuReaderConfig( name="checklist", data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_checklist-data.tar.gz", ), # DuReaderConfig( # name="yesno", # data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_yesno-data.tar.gz", # ), ] def _info(self) -> DatasetInfo: if self.config.name == "robust": features = { "id": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), } return datasets.DatasetInfo( description="", citation="", homepage="", features=datasets.Features(features), ) if self.config.name == "checklist": features = { "id": datasets.Value("string"), "title": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "is_impossible": datasets.Value("string"), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), "type": datasets.Value("string"), } return datasets.DatasetInfo( description="", citation="", homepage="", features=datasets.Features(features), ) return None def _split_generators(self, dl_manager: DownloadManager): """Split generators""" def _build(train_files, valid_files, test_files): train_split = datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": train_files, "split": "train", }, ) valid_split = datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "data_file": valid_files, "split": "dev", }, ) test_split = datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "data_file": test_files, "split": "test", }, ) return [train_split, valid_split, test_split] if self.config.name == "robust": dl_dir = dl_manager.download_and_extract(self.config.data_url) splits = _build( train_files=os.path.join(dl_dir, "dureader_robust-data", "train.json"), valid_files=os.path.join(dl_dir, "dureader_robust-data", "dev.json"), test_files=os.path.join(dl_dir, "dureader_robust-data", "test.json"), ) return splits if self.config.name == "checklist": dl_dir = dl_manager.download_and_extract(self.config.data_url) splits = _build( train_files=os.path.join(dl_dir, "dureader_checklist-data", "train.json"), valid_files=os.path.join(dl_dir, "dureader_checklist-data", "dev.json"), test_files=os.path.join(dl_dir, "dureader_checklist-data", "test.json"), ) return splits return [] def _generate_examples(self, data_file, split): if self.config.name == "robust": if split == "train" or split == "dev": print("Processing split: ", split) return self._generate_robust_examples(data_file) print("Processing split: ", split) return self._generate_robust_test_examples(data_file) if self.config.name == "checklist": if split == "train" or split == "dev": print("Processing split: ", split) return self._generate_checklist_examples(data_file) print("Processing split: ", split) return self._generate_checklist_test_examples(data_file) def _generate_robust_examples(self, data_file): with open(data_file, mode="rt", encoding="utf-8") as fin: data = json.load(fin)["data"] for d in data: for p in d["paragraphs"]: context = p["context"] for qa in p["qas"]: starts = [x["answer_start"] for x in qa["answers"]] answers = [x["text"] for x in qa["answers"]] example = { "id": qa["id"], "context": context, "question": qa["question"], "answers": { "text": answers, "answer_start": starts, }, } yield example["id"], example def _generate_robust_test_examples(self, data_file): with open(data_file, mode="rt", encoding="utf-8") as fin: data = json.load(fin)["data"] for d in data: for p in d["paragraphs"]: context = p["context"] for qa in p["qas"]: qid = qa["id"] example = { "id": qid, "context": context, "question": qa["question"], "answers": { "text": [], "answer_start": [], }, } yield example["id"], example def _generate_checklist_examples(self, data_file): with open(data_file, mode="rt", encoding="utf-8") as fin: data = json.load(fin)["data"] exist_ids = set() for d in data: for p in d["paragraphs"]: title = p["title"].strip() context = p["context"].strip() for qa in p["qas"]: qid = qa["id"] # skip dumplicate keys if qid in exist_ids: continue exist_ids.add(qid) starts = [x["answer_start"] for x in qa["answers"]] answers = [x["text"].strip() for x in qa["answers"]] example = { "id": qid, "title": title, "context": context, "question": qa["question"].strip(), "is_impossible": qa["is_impossible"], "answers": { "text": answers, "answer_start": starts, }, "type": qa["type"].strip(), } yield example["id"], example def _generate_checklist_test_examples(self, data_file): with open(data_file, mode="rt", encoding="utf-8") as fin: data = json.load(fin)["data"] exist_ids = set() for d in data: for p in d["paragraphs"]: title = p["title"] context = p["context"] for qa in p["qas"]: qid = qa["id"] if qid in exist_ids: continue exist_ids.add(qid) example = { "id": qid, "title": title, "context": context, "question": qa["question"], "is_impossible": None, "answers": { "text": [], "answer_start": [], }, "type": None, } yield example["id"], example