Datasets:
Tasks:
Text Classification
Multilinguality:
multilingual
Size Categories:
10K<n<100K
Annotations Creators:
expert-generated
Source Datasets:
miracl/miracl
ArXiv:
License:
# coding=utf-8 | |
# Copyright 2022 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
"""NoMIRACL: A dataset to evaluation LLM robustness across 18 languages.""" | |
import os | |
import json | |
import csv | |
import datasets | |
from collections import defaultdict | |
_CITATION = """\ | |
@article{thakur2023nomiracl, | |
title={NoMIRACL: Knowing When You Don't Know for Robust Multilingual Retrieval-Augmented Generation}, | |
author={Nandan Thakur and Luiz Bonifacio and Xinyu Zhang and Odunayo Ogundepo and Ehsan Kamalloo and David Alfonso-Hermelo and Xiaoguang Li and Qun Liu and Boxing Chen and Mehdi Rezagholizadeh and Jimmy Lin}, | |
journal={ArXiv}, | |
year={2023}, | |
volume={abs/2312.11361} | |
} | |
""" | |
_DESCRIPTION = """\ | |
Data Loader for the NoMIRACL dataset. | |
""" | |
_URL = "https://github.com/project-miracl/nomiracl" | |
_DL_URL_FORMAT = "data/{name}" | |
def load_topics(filepath: str): | |
""" | |
Loads queries from a file and stores them in a dictionary. | |
""" | |
queries = {} | |
with open(filepath, 'r', encoding='utf-8') as f: | |
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) | |
for row in reader: | |
queries[row[0]] = row[1] | |
return queries | |
def load_corpus(filepath: str): | |
""" | |
Loads the corpus file as a dictionary. | |
""" | |
corpus = {} | |
with open(filepath, encoding='utf8') as fIn: | |
for line in fIn: | |
line = json.loads(line) | |
corpus[line.get("docid")] = { | |
"text": line.get("text", "").strip(), | |
"title": line.get("title", "").strip(), | |
} | |
return corpus | |
def load_qrels(filepath: str): | |
if filepath is None: | |
return None | |
qrels = defaultdict(dict) | |
with open(filepath, encoding="utf-8") as f: | |
for line in f: | |
qid, _, docid, rel = line.strip().split('\t') | |
qrels[qid][docid] = int(rel) | |
return qrels | |
class NoMIRACLConfig(datasets.BuilderConfig): | |
"""BuilderConfig for NoMIRACL.""" | |
def __init__(self, name, **kwargs): | |
""" | |
Args: | |
name: `string`, name of dataset config (=language) | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(NoMIRACLConfig, self).__init__( | |
version=datasets.Version("1.0.0", ""), name=name.lower(), **kwargs | |
) | |
# relative path to full data inside a repo (for example `data/german`) | |
self.data_root_url = _DL_URL_FORMAT.format(name=name) | |
class NoMIRACL(datasets.GeneratorBasedBuilder): | |
"""Multilingual NoMIRACL dataset.""" | |
BUILDER_CONFIGS = [ | |
NoMIRACLConfig(name="arabic", description="Arabic NoMIRACL dataset"), | |
NoMIRACLConfig(name="chinese", description="Chinese NoMIRACL dataset"), | |
NoMIRACLConfig(name="finnish", description="Finnish NoMIRACL dataset"), | |
NoMIRACLConfig(name="german", description="German NoMIRACL dataset"), | |
NoMIRACLConfig(name="indonesian", description="Indonesian NoMIRACL dataset"), | |
NoMIRACLConfig(name="korean", description="Korean NoMIRACL dataset"), | |
NoMIRACLConfig(name="russian", description="Russian NoMIRACL dataset"), | |
NoMIRACLConfig(name="swahili", description="Swahili NoMIRACL dataset"), | |
NoMIRACLConfig(name="thai", description="Thai NoMIRACL dataset"), | |
NoMIRACLConfig(name="bengali", description="Bengali NoMIRACL dataset"), | |
NoMIRACLConfig(name="english", description="English NoMIRACL dataset"), | |
NoMIRACLConfig(name="french", description="French NoMIRACL dataset"), | |
NoMIRACLConfig(name="hindi", description="Hindi NoMIRACL dataset"), | |
NoMIRACLConfig(name="japanese", description="Japanese NoMIRACL dataset"), | |
NoMIRACLConfig(name="persian", description="Persian NoMIRACL dataset"), | |
NoMIRACLConfig(name="spanish", description="Spanish NoMIRACL dataset"), | |
NoMIRACLConfig(name="telugu", description="Telugu NoMIRACL dataset"), | |
NoMIRACLConfig(name="yoruba", description="Yoruba NoMIRACL dataset"), | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features({ | |
'query_id': datasets.Value('string'), | |
'query': datasets.Value('string'), | |
'positive_passages': [{ | |
'docid': datasets.Value('string'), | |
'text': datasets.Value('string'), | |
'title': datasets.Value('string') | |
}], | |
'negative_passages': [{ | |
'docid': datasets.Value('string'), | |
'text': datasets.Value('string'), | |
'title': datasets.Value('string'), | |
}], | |
}), | |
supervised_keys=("file", "text"), | |
homepage=_URL, | |
citation=_CITATION, | |
task_templates=None, | |
) | |
def _split_generators(self, dl_manager): | |
# Download downloaded_files | |
downloaded_files = dl_manager.download_and_extract({ | |
"corpus": self.config.data_root_url + "/corpus.jsonl.gz", | |
"dev": {"qrels": {"relevant": self.config.data_root_url + "/qrels/dev.relevant.tsv", | |
"non_relevant": self.config.data_root_url + "/qrels/dev.non_relevant.tsv"}, | |
"topics": {"relevant": self.config.data_root_url + "/topics/dev.relevant.tsv", | |
"non_relevant": self.config.data_root_url + "/topics/dev.non_relevant.tsv"}}, | |
"test": {"qrels": {"relevant": self.config.data_root_url + "/qrels/test.relevant.tsv", | |
"non_relevant": self.config.data_root_url + "/qrels/test.non_relevant.tsv"}, | |
"topics": {"relevant": self.config.data_root_url + "/topics/test.relevant.tsv", | |
"non_relevant": self.config.data_root_url + "/topics/test.non_relevant.tsv"}}, | |
}) | |
splits = [ | |
datasets.SplitGenerator( | |
name="dev.relevant", | |
gen_kwargs={ | |
"corpus_path": downloaded_files["corpus"], | |
"qrels_path": downloaded_files["dev"]["qrels"]["relevant"], | |
"topics_path": downloaded_files["dev"]["topics"]["relevant"], | |
} | |
), | |
datasets.SplitGenerator( | |
name="dev.non_relevant", | |
gen_kwargs={ | |
"corpus_path": downloaded_files["corpus"], | |
"qrels_path": downloaded_files["dev"]["qrels"]["non_relevant"], | |
"topics_path": downloaded_files["dev"]["topics"]["non_relevant"], | |
}, | |
), | |
datasets.SplitGenerator( | |
name="test.relevant", | |
gen_kwargs={ | |
"corpus_path": downloaded_files["corpus"], | |
"qrels_path": downloaded_files["test"]["qrels"]["relevant"], | |
"topics_path": downloaded_files["test"]["topics"]["relevant"], | |
} | |
), | |
datasets.SplitGenerator( | |
name="test.non_relevant", | |
gen_kwargs={ | |
"corpus_path": downloaded_files["corpus"], | |
"qrels_path": downloaded_files["test"]["qrels"]["non_relevant"], | |
"topics_path": downloaded_files["test"]["topics"]["non_relevant"], | |
}, | |
), | |
] | |
return splits | |
def _generate_examples(self, corpus_path, qrels_path, topics_path): | |
corpus = load_corpus(corpus_path) | |
qrels = load_qrels(qrels_path) | |
topics = load_topics(topics_path) | |
for qid in topics: | |
data = {} | |
data['query_id'] = qid | |
data['query'] = topics[qid] | |
pos_docids = [docid for docid, rel in qrels[qid].items() if rel == 1] if qrels is not None else [] | |
neg_docids = [docid for docid, rel in qrels[qid].items() if rel == 0] if qrels is not None else [] | |
data['positive_passages'] = [{ | |
'docid': docid, | |
**corpus[docid] | |
} for docid in pos_docids if docid in corpus] | |
data['negative_passages'] = [{ | |
'docid': docid, | |
**corpus[docid] | |
} for docid in neg_docids if docid in corpus] | |
yield qid, data | |