system HF staff commited on
Commit
7a25764
1 Parent(s): c7430d4

Update files from the datasets library (from 1.17.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.17.0

Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. qed.py +7 -1
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"qed": {"description": "QED, is a linguistically informed, extensible framework for explanations in question answering. A QED explanation specifies the relationship between a question and answer according to formal semantic notions such as referential equality, sentencehood, and entailment. It is an expertannotated dataset of QED explanations built upon a subset of the Google Natural Questions dataset.\n", "citation": "@misc{lamm2020qed,\n title={QED: A Framework and Dataset for Explanations in Question Answering},\n author={Matthew Lamm and Jennimaria Palomaki and Chris Alberti and Daniel Andor and Eunsol Choi and Livio Baldini Soares and Michael Collins},\n year={2020},\n eprint={2009.06354},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/google-research-datasets/QED", "license": "", "features": {"example_id": {"dtype": "int64", "id": null, "_type": "Value"}, "title_text": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "paragraph_text": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_starts": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "original_nq_answers": [[{"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}]], "annotation": {"referential_equalities": [{"question_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}, "sentence_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "bridge": {"dtype": "bool_", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}}], "answer": [{"sentence_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "bridge": {"dtype": "bool_", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}, "paragraph_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}}], "explanation_type": {"dtype": "string", "id": null, "_type": "Value"}, "selected_sentence": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}}}, "post_processed": null, "supervised_keys": null, "builder_name": "qed", "config_name": "qed", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8560864, "num_examples": 7638, "dataset_name": "qed"}, "validation": {"name": "validation", "num_bytes": 1615171, "num_examples": 1355, "dataset_name": "qed"}}, "download_checksums": {"https://raw.githubusercontent.com/google-research-datasets/QED/master/qed-train.jsonlines": {"num_bytes": 11839736, "checksum": "b5cf65414defef8d42f6778dbd3cf0fa710adcdcb86fc693ab8edec8f0be7faf"}, "https://raw.githubusercontent.com/google-research-datasets/QED/master/qed-dev.jsonlines": {"num_bytes": 2244232, "checksum": "2ea322b71a333023380c3954083b81af2d5670c8ac47ddec58c843233895c429"}}, "download_size": 14083968, "post_processing_size": null, "dataset_size": 10176035, "size_in_bytes": 24260003}}
1
+ {"qed": {"description": "QED, is a linguistically informed, extensible framework for explanations in question answering. A QED explanation specifies the relationship between a question and answer according to formal semantic notions such as referential equality, sentencehood, and entailment. It is an expertannotated dataset of QED explanations built upon a subset of the Google Natural Questions dataset.\n", "citation": "@misc{lamm2020qed,\n title={QED: A Framework and Dataset for Explanations in Question Answering},\n author={Matthew Lamm and Jennimaria Palomaki and Chris Alberti and Daniel Andor and Eunsol Choi and Livio Baldini Soares and Michael Collins},\n year={2020},\n eprint={2009.06354},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/google-research-datasets/QED", "license": "", "features": {"example_id": {"dtype": "int64", "id": null, "_type": "Value"}, "title_text": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "paragraph_text": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_starts": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "original_nq_answers": [{"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}], "annotation": {"referential_equalities": [{"question_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}, "sentence_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "bridge": {"dtype": "string", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}}], "answer": [{"sentence_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "bridge": {"dtype": "string", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}, "paragraph_reference": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}}], "explanation_type": {"dtype": "string", "id": null, "_type": "Value"}, "selected_sentence": {"start": {"dtype": "int32", "id": null, "_type": "Value"}, "end": {"dtype": "int32", "id": null, "_type": "Value"}, "string": {"dtype": "string", "id": null, "_type": "Value"}}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "qed", "config_name": "qed", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8602094, "num_examples": 7638, "dataset_name": "qed"}, "validation": {"name": "validation", "num_bytes": 1584139, "num_examples": 1355, "dataset_name": "qed"}}, "download_checksums": {"https://raw.githubusercontent.com/google-research-datasets/QED/master/qed-train.jsonlines": {"num_bytes": 11839736, "checksum": "b5cf65414defef8d42f6778dbd3cf0fa710adcdcb86fc693ab8edec8f0be7faf"}, "https://raw.githubusercontent.com/google-research-datasets/QED/master/qed-dev.jsonlines": {"num_bytes": 2244232, "checksum": "2ea322b71a333023380c3954083b81af2d5670c8ac47ddec58c843233895c429"}}, "download_size": 14083968, "post_processing_size": null, "dataset_size": 10186233, "size_in_bytes": 24270201}}
qed.py CHANGED
@@ -64,7 +64,7 @@ class Qed(datasets.GeneratorBasedBuilder):
64
  reference_features = {
65
  "start": datasets.Value("int32"),
66
  "end": datasets.Value("int32"),
67
- "bridge": datasets.Value("bool_"),
68
  "string": datasets.Value("string"),
69
  }
70
  return datasets.DatasetInfo(
@@ -132,6 +132,12 @@ class Qed(datasets.GeneratorBasedBuilder):
132
  }
133
  if "referential_equalities" not in example["annotation"]:
134
  example["annotation"]["referential_equalities"] = []
 
 
 
 
 
 
135
 
136
  # remove the nested list
137
  example["original_nq_answers"] = example["original_nq_answers"][0]
64
  reference_features = {
65
  "start": datasets.Value("int32"),
66
  "end": datasets.Value("int32"),
67
+ "bridge": datasets.Value("string"),
68
  "string": datasets.Value("string"),
69
  }
70
  return datasets.DatasetInfo(
132
  }
133
  if "referential_equalities" not in example["annotation"]:
134
  example["annotation"]["referential_equalities"] = []
135
+ else:
136
+ for referential_equalities in example["annotation"]["referential_equalities"]:
137
+ bridge = referential_equalities["sentence_reference"]["bridge"]
138
+ referential_equalities["sentence_reference"]["bridge"] = (
139
+ bridge if bridge is not False else None
140
+ )
141
 
142
  # remove the nested list
143
  example["original_nq_answers"] = example["original_nq_answers"][0]