Datasets:
Languages:
English
Multilinguality:
multilingual
Size Categories:
10M<n<100M
Language Creators:
crowdsourced
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
import os | |
import pyarrow as pa | |
import pyarrow.parquet as pq | |
import datasets | |
logger = datasets.logging.get_logger(__name__) | |
_CITATION = """ | |
@inproceedings{karpukhin-etal-2020-dense, | |
title = "Dense Passage Retrieval for Open-Domain Question Answering", | |
author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov, Sergey and Chen, Danqi and Yih, Wen-tau", | |
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", | |
month = nov, | |
year = "2020", | |
address = "Online", | |
publisher = "Association for Computational Linguistics", | |
url = "https://www.aclweb.org/anthology/2020.emnlp-main.550", | |
doi = "10.18653/v1/2020.emnlp-main.550", | |
pages = "6769--6781", | |
} | |
""" | |
_DESCRIPTION = """ | |
This is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model. | |
It contains 21M passages from wikipedia along with their DPR embeddings. | |
The wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages. | |
""" | |
_LICENSE = """DPR is CC-BY-NC 4.0 licensed.""" | |
_DATA_TO_NUM_SHARDS = { | |
"nq": 157, | |
"multiset": 157, | |
"no_embeddings": 28, | |
"dummy.nq": 1, | |
"dummy.multiset": 1, | |
"dummy.no_embeddings": 1, | |
} | |
class WikiDprConfig(datasets.BuilderConfig): | |
"""BuilderConfig for WikiDprConfig.""" | |
def __init__( | |
self, | |
with_embeddings=True, | |
with_index=True, | |
wiki_split="psgs_w100", | |
embeddings_name="nq", | |
index_name="compressed", | |
index_train_size=262144, | |
dummy=False, | |
**kwargs, | |
): | |
"""BuilderConfig for WikiSnippets. | |
Args: | |
with_embeddings (`bool`, defaults to `True`): Load the 768-dimensional embeddings from DPR. | |
with_index (`bool`, defaults to `True`): Load the faiss index trained on the embeddings. | |
wiki_split (`str`, defaults to `psgs_w100`): name of the splitting method of wiki articles. | |
embeddings_name (`str`, defaults to `nq`): "nq" or "multiset", depending on which dataset DPR was trained on. | |
index_name (`str`, defaults to `compressed`): "compressed" or "exact", the configuration of the faiss index to use. | |
index_train_size (`int`, defaults to `262144`): Size of the subset to use to train the index, if it is trainable. | |
dummy (`bool`, defaults to `False`): Dummy uses only 10 000 examples for testing purposes. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
self.with_embeddings = with_embeddings | |
self.with_index = with_index and index_name != "no_index" | |
self.wiki_split = wiki_split | |
self.embeddings_name = embeddings_name | |
self.index_name = index_name if with_index else "no_index" | |
self.index_train_size = index_train_size | |
self.dummy = dummy | |
name = [self.wiki_split, self.embeddings_name, self.index_name] | |
if not self.with_embeddings: | |
name.append("no_embeddings") | |
if self.dummy: | |
name = ["dummy"] + name | |
assert ( | |
self.index_name != "compressed" or not self.with_index | |
), "Please use `index_name='exact' for dummy wiki_dpr`" | |
assert wiki_split == "psgs_w100" | |
assert embeddings_name in ("nq", "multiset") | |
assert index_name in ("compressed", "exact", "no_index") | |
kwargs["name"] = ".".join(name) | |
super(WikiDprConfig, self).__init__(**kwargs) | |
prefix = f"{wiki_split}.{embeddings_name}." | |
if self.index_name == "exact": | |
self.index_file = prefix + "HNSW128_SQ8-IP-{split}.faiss" | |
else: | |
self.index_file = prefix + "IVF4096_HNSW128_PQ128-IP-{split}.faiss" | |
if self.dummy: | |
self.index_file = "dummy." + self.index_file | |
class WikiDpr(datasets.ArrowBasedBuilder): | |
BUILDER_CONFIG_CLASS = WikiDprConfig | |
BUILDER_CONFIGS = [ | |
WikiDprConfig( | |
embeddings_name=embeddings_name, | |
with_embeddings=with_embeddings, | |
index_name=index_name, | |
version=datasets.Version("0.0.0"), | |
) | |
for with_embeddings in (True, False) | |
for embeddings_name in ("nq", "multiset") | |
for index_name in ("exact", "compressed", "no_index") | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"text": datasets.Value("string"), | |
"title": datasets.Value("string"), | |
"embeddings": datasets.Sequence(datasets.Value("float32")), | |
} | |
) | |
if self.config.with_embeddings | |
else datasets.Features( | |
{"id": datasets.Value("string"), "text": datasets.Value("string"), "title": datasets.Value("string")} | |
), | |
supervised_keys=None, | |
homepage="https://github.com/facebookresearch/DPR", | |
citation=_CITATION, | |
license=_LICENSE, | |
) | |
def _split_generators(self, dl_manager): | |
data_dir = self.config.embeddings_name if self.config.with_embeddings else "no_embeddings" | |
if self.config.dummy: | |
data_dir = "dummy." + data_dir | |
num_shards = _DATA_TO_NUM_SHARDS[data_dir] | |
data_dir = os.path.join("data", self.config.wiki_split, data_dir) | |
files = [os.path.join(data_dir, f"train-{i:05d}-of-{num_shards:05d}.parquet") for i in range(num_shards)] | |
downloaded_files = dl_manager.download_and_extract(files) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": downloaded_files}), | |
] | |
def _generate_tables(self, files): | |
for file in files: | |
with open(file, "rb") as f: | |
f = pq.ParquetFile(f) | |
for batch_idx, batch in enumerate(f.iter_batches(batch_size=1000)): | |
yield batch_idx, pa.Table.from_batches([batch]) | |
batch_idx += 1 | |
def _post_processing_resources(self, split): | |
if self.config.with_index: | |
return {"embeddings_index": self.config.index_file.format(split=split)} | |
else: | |
return {} | |
def _download_post_processing_resources(self, split, resource_name, dl_manager): | |
if resource_name == "embeddings_index": | |
try: | |
downloaded_resources = dl_manager.download_and_extract( | |
{"embeddings_index": os.path.join("index", self.config.index_file.format(split=split))} | |
) | |
return downloaded_resources["embeddings_index"] | |
except (FileNotFoundError, ConnectionError): # index doesn't exist | |
pass | |
def _post_process(self, dataset, resources_paths): | |
if self.config.with_index: | |
index_file = resources_paths["embeddings_index"] | |
if os.path.exists(index_file): | |
dataset.load_faiss_index("embeddings", index_file) | |
else: | |
if "embeddings" not in dataset.column_names: | |
raise ValueError("Couldn't build the index because there are no embeddings.") | |
import faiss | |
d = 768 | |
train_size = self.config.index_train_size | |
logger.info("Building wiki_dpr faiss index") | |
if self.config.index_name == "exact": | |
index = faiss.IndexHNSWSQ(d, faiss.ScalarQuantizer.QT_8bit, 128, faiss.METRIC_INNER_PRODUCT) | |
index.hnsw.efConstruction = 200 | |
index.hnsw.efSearch = 128 | |
dataset.add_faiss_index("embeddings", custom_index=index, train_size=train_size) | |
else: | |
quantizer = faiss.IndexHNSWFlat(d, 128, faiss.METRIC_INNER_PRODUCT) | |
quantizer.hnsw.efConstruction = 200 | |
quantizer.hnsw.efSearch = 128 | |
ivf_index = faiss.IndexIVFPQ(quantizer, d, 4096, 128, 8, faiss.METRIC_INNER_PRODUCT) | |
ivf_index.nprobe = 64 | |
ivf_index.own_fields = True | |
quantizer.this.disown() | |
dataset.add_faiss_index( | |
"embeddings", | |
train_size=train_size, | |
custom_index=ivf_index, | |
) | |
logger.info("Saving wiki_dpr faiss index") | |
dataset.save_faiss_index("embeddings", index_file) | |
return dataset | |