Datasets:
Nandan Thakur
commited on
Commit
•
8cebd13
1
Parent(s):
6656605
added beir corpus loading file
Browse files- beir-corpus.py +58 -0
beir-corpus.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import csv
|
3 |
+
import os
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
logger = datasets.logging.get_logger(__name__)
|
7 |
+
|
8 |
+
|
9 |
+
_DESCRIPTION = "BEIR Benchmark"
|
10 |
+
_DATASETS = ["fiqa", "trec-covid"]
|
11 |
+
|
12 |
+
URL = ""
|
13 |
+
_URLs = {
|
14 |
+
dataset: {
|
15 |
+
"corpus": URL + f"{dataset}/corpus.jsonl",
|
16 |
+
} for dataset in _DATASETS}
|
17 |
+
|
18 |
+
|
19 |
+
class BEIR(datasets.GeneratorBasedBuilder):
|
20 |
+
"""BEIR BenchmarkDataset."""
|
21 |
+
|
22 |
+
BUILDER_CONFIGS = [
|
23 |
+
datasets.BuilderConfig(
|
24 |
+
name=dataset,
|
25 |
+
description=f"This is the {dataset} dataset in BEIR Benchmark.",
|
26 |
+
) for dataset in _DATASETS
|
27 |
+
]
|
28 |
+
|
29 |
+
def _info(self):
|
30 |
+
return datasets.DatasetInfo(
|
31 |
+
description=_DESCRIPTION,
|
32 |
+
features=datasets.Features({
|
33 |
+
"_id": datasets.Value("string"),
|
34 |
+
"title": datasets.Value("string"),
|
35 |
+
"text": datasets.Value("string")
|
36 |
+
}),
|
37 |
+
supervised_keys=None,
|
38 |
+
)
|
39 |
+
|
40 |
+
def _split_generators(self, dl_manager):
|
41 |
+
"""Returns SplitGenerators."""
|
42 |
+
|
43 |
+
my_urls = _URLs[self.config.name]
|
44 |
+
data_dir = dl_manager.download_and_extract(my_urls)
|
45 |
+
return [
|
46 |
+
datasets.SplitGenerator(
|
47 |
+
name="corpus",
|
48 |
+
# These kwargs will be passed to _generate_examples
|
49 |
+
gen_kwargs={"corpus_path": data_dir["corpus"]}
|
50 |
+
),
|
51 |
+
]
|
52 |
+
|
53 |
+
def _generate_examples(self, corpus_path):
|
54 |
+
"""Yields examples."""
|
55 |
+
with open(corpus_path, encoding="utf-8") as f:
|
56 |
+
texts = f.readlines()
|
57 |
+
for i, text in enumerate(texts):
|
58 |
+
yield i, json.loads(text)
|