Datasets:
Languages:
Portuguese
License:
leonardo-avila
commited on
Commit
•
175a1cd
1
Parent(s):
db97385
Update fiqa_pt.py
Browse files- fiqa_pt.py +63 -29
fiqa_pt.py
CHANGED
@@ -1,43 +1,68 @@
|
|
1 |
-
import json
|
2 |
import csv
|
|
|
3 |
import os
|
|
|
4 |
import datasets
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
_DESCRIPTION = "FIQA translated dataset"
|
9 |
-
_SPLITS = ["corpus", "topics"]
|
10 |
|
11 |
-
|
12 |
-
|
|
|
13 |
|
14 |
-
|
15 |
-
"""BEIR-PT BenchmarkDataset."""
|
16 |
|
|
|
|
|
|
|
17 |
BUILDER_CONFIGS = [
|
18 |
-
datasets.BuilderConfig(
|
19 |
-
|
20 |
-
|
21 |
-
) for name in _SPLITS
|
22 |
]
|
23 |
-
|
|
|
|
|
24 |
def _info(self):
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
return datasets.DatasetInfo(
|
27 |
description=_DESCRIPTION,
|
28 |
-
|
29 |
-
"_id": datasets.Value("string"),
|
30 |
-
"text": datasets.Value("string"),
|
31 |
-
}),
|
32 |
-
supervised_keys=None,
|
33 |
)
|
34 |
|
35 |
def _split_generators(self, dl_manager):
|
36 |
-
|
37 |
-
|
38 |
-
my_urls = _URLs[self.config.name]
|
39 |
-
data_dir = dl_manager.download_and_extract(my_urls)
|
40 |
|
|
|
|
|
|
|
|
|
|
|
41 |
return [
|
42 |
datasets.SplitGenerator(
|
43 |
name=self.config.name,
|
@@ -46,11 +71,20 @@ class BEIR_PT(datasets.GeneratorBasedBuilder):
|
|
46 |
),
|
47 |
]
|
48 |
|
49 |
-
|
50 |
-
|
51 |
with open(filepath, encoding="utf-8") as f:
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import csv
|
2 |
+
import json
|
3 |
import os
|
4 |
+
|
5 |
import datasets
|
6 |
|
7 |
+
_DESCRIPTION = """\
|
8 |
+
FIQA translated dataset to portuguese
|
9 |
+
"""
|
10 |
+
|
11 |
+
_URLS = {
|
12 |
+
"corpus": "https://huggingface.co/datasets/leonardo-avila/fiqa_pt/blob/main/corpus_pt.tsv",
|
13 |
+
"topics": "https://huggingface.co/datasets/leonardo-avila/fiqa_pt/blob/main/topics_pt.tsv",
|
14 |
+
"qrel": "https://huggingface.co/qrel.tsv",
|
15 |
+
}
|
16 |
|
|
|
|
|
17 |
|
18 |
+
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
19 |
+
class BeirPT(datasets.GeneratorBasedBuilder):
|
20 |
+
"""BEIR BenchmarkDataset."""
|
21 |
|
22 |
+
VERSION = datasets.Version("1.1.0")
|
|
|
23 |
|
24 |
+
# You will be able to load one or the other configurations in the following list with
|
25 |
+
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
26 |
+
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
27 |
BUILDER_CONFIGS = [
|
28 |
+
datasets.BuilderConfig(name="corpus", version=VERSION, description="Load corpus"),
|
29 |
+
datasets.BuilderConfig(name="topics", version=VERSION, description="Load topics"),
|
30 |
+
datasets.BuilderConfig(name="qrel", version=VERSION, description="Load qrel"),
|
|
|
31 |
]
|
32 |
+
|
33 |
+
DEFAULT_CONFIG_NAME = "corpus" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
34 |
+
|
35 |
def _info(self):
|
36 |
|
37 |
+
if self.config.name in ["corpus", "topics"]:
|
38 |
+
features = datasets.Features(
|
39 |
+
{
|
40 |
+
"id": datasets.Value("string"),
|
41 |
+
"text": datasets.Value("string"),
|
42 |
+
}
|
43 |
+
)
|
44 |
+
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
45 |
+
features = datasets.Features(
|
46 |
+
{
|
47 |
+
"query_id": datasets.Value("string"),
|
48 |
+
"doc_id": datasets.Value("string"),
|
49 |
+
"rel": datasets.Value("string"),
|
50 |
+
}
|
51 |
+
)
|
52 |
return datasets.DatasetInfo(
|
53 |
description=_DESCRIPTION,
|
54 |
+
features=features
|
|
|
|
|
|
|
|
|
55 |
)
|
56 |
|
57 |
def _split_generators(self, dl_manager):
|
58 |
+
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
59 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
|
|
|
|
60 |
|
61 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
62 |
+
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
63 |
+
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
64 |
+
urls = _URLS[self.config.name]
|
65 |
+
data_dir = dl_manager.download_and_extract(urls)
|
66 |
return [
|
67 |
datasets.SplitGenerator(
|
68 |
name=self.config.name,
|
|
|
71 |
),
|
72 |
]
|
73 |
|
74 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
75 |
+
def _generate_examples(self, filepath, split):
|
76 |
with open(filepath, encoding="utf-8") as f:
|
77 |
+
if self.config.name in ["corpus", "topics"]:
|
78 |
+
for line in f:
|
79 |
+
fields = line.strip().split("\t")
|
80 |
+
idx = fields[0]
|
81 |
+
text = fields[1]
|
82 |
+
yield idx, text
|
83 |
+
else:
|
84 |
+
for line in f:
|
85 |
+
if "query-id" not in line:
|
86 |
+
fields = line.strip().split("\t")
|
87 |
+
query_id = fields[0]
|
88 |
+
doc_id = fields[1]
|
89 |
+
rel = int(fields[2])
|
90 |
+
yield query_id, doc_id, rel
|