File size: 7,128 Bytes
6327fcf be3c9e0 6327fcf 4db9341 6327fcf fc4a4e3 6b23a62 fc4a4e3 6b23a62 6327fcf a4a1088 a4c72e3 a4a1088 a4c72e3 a4a1088 6327fcf c13918c 6327fcf eadf49e 6327fcf eadf49e 6327fcf 2e90bb1 fc4a4e3 2e90bb1 5080226 2e90bb1 5080226 2e90bb1 5080226 2e90bb1 5080226 fc4a4e3 e162dbc 5080226 e162dbc 5080226 2e90bb1 5080226 e162dbc be3c9e0 fc4a4e3 ed087a5 e162dbc fc4a4e3 6327fcf cf19afa 6327fcf 8853685 ed087a5 8853685 6327fcf 2e90bb1 e162dbc 5080226 e162dbc 5080226 e162dbc 6327fcf 1850808 e162dbc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Wikipedia NQ dataset."""
import json
import random
random.seed(42)
import datasets
RANGE = (0, 1000)
_CITATION = """
@inproceedings{xorqa,
title = {{XOR} {QA}: Cross-lingual Open-Retrieval Question Answering},
author = {Akari Asai and Jungo Kasai and Jonathan H. Clark and Kenton Lee and Eunsol Choi and Hannaneh Hajishirzi},
booktitle={NAACL-HLT},
year = {2021}
}
"""
_DESCRIPTION = "dataset load script for Wikipedia NQ"
base = "/home/czhang/src/task-sparse/tevatron/hgf_datasets/xor-tydi"
_DATASET_URLS = {
'targetQ': {
'train': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/train/targetL_dpr_train_data.json',
'dev': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/dev/xor_dev_retrieve_eng_span_v1_1.jsonl',
'test': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/test/xor_test_retrieve_eng_span_q_only_v1_1.jsonl',
},
'engQ': {
'train': f'https://huggingface.co/datasets/crystina-z/xor-tydi/resolve/main/train/EN_dpr_train_data.json',
}
}
class XORTyDi(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
version=VERSION,
name="targetQ",
description="XOR-TyDI train/dev/test datasets of English Span Task"),
datasets.BuilderConfig(
version=VERSION,
name="engQ",
description="XOR-TyDI train/dev/test datasets of Full Task"),
]
def _info(self):
features = datasets.Features({
'query_id': datasets.Value('string'),
'query': datasets.Value('string'),
'answers': [datasets.Value('string')],
'lang': datasets.Value('string'),
'positive_passages': [
{'docid': datasets.Value('string'), 'text': datasets.Value('string'),
'title': datasets.Value('string')}
],
'negative_passages': [
{'docid': datasets.Value('string'), 'text': datasets.Value('string'),
'title': datasets.Value('string')}
],
})
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="",
# License for the dataset if available
license="",
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
group = self.config.name
if self.config.data_files:
downloaded_files = self.config.data_files
else:
downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[group])
splits = [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
},
) for split in downloaded_files
]
return splits
def _generate_examples(self, files):
assert len(files) == 1
filepath = files[0]
def process_doc_text(doc):
if isinstance(doc["text"], list):
assert len(doc["text"]) == 1
return doc['text'][0].strip()
else:
assert isinstance(doc["text"], str)
return doc['text'].strip()
# prepare doc
def get_doc2docid(all_data):
doc2docid = {}
# with open(filepath, encoding="utf-8") as f:
# all_data = json.load(f)
for i, data in enumerate(all_data):
positive_ctxs = data["positive_ctxs"]
hard_negative_ctxs = data["hard_negative_ctxs"]
ctxs = positive_ctxs + hard_negative_ctxs
for doc in ctxs:
text = process_doc_text(doc)
if text not in doc2docid:
doc2docid[text] = len(doc2docid)
return doc2docid
def process_train_entry(data, _id, doc2docid):
positive_ctxs = data["positive_ctxs"]
hard_negative_ctxs = data["hard_negative_ctxs"]
# each ctx: {'title':... , 'text': ....}
def process_ctx(ctxs, tag):
processed = []
for i, doc in enumerate(ctxs):
text = process_doc_text(doc)
processed.append({
"title": doc["title"],
"text": text,
# 'docid': f'{tag}-{i}-{random.randint(*RANGE)}'
'docid': doc2docid[text]
})
return processed
return _id, {
"query_id": _id,
"query": data["question"],
"answers": data.get("answers", []),
"lang": "",
"positive_passages": process_ctx(positive_ctxs, "pos"),
"negative_passages": process_ctx(hard_negative_ctxs, "neg"),
}
def process_dev_test_entry(data):
return data["id"], {
"query_id": data["id"],
"query": data["question"],
"answers": data.get("answers", []),
"lang": data["lang"],
"positive_passages": [],
"negative_passages": [],
}
try:
with open(filepath, encoding="utf-8") as f:
all_data = json.load(f)
doc2docid = get_doc2docid(all_data)
for i, data in enumerate(all_data):
yield process_train_entry(data, i, doc2docid)
# if filepath.endswith(".jsonl"): <-- doesn't work
except Exception as e:
with open(filepath, encoding="utf-8") as f:
for line in f:
data = json.loads(line)
if "id" in data and "query_id" not in data:
yield process_dev_test_entry(data)
|