File size: 2,550 Bytes
33a82e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import json
import os

import datasets
from beir.datasets.data_loader import GenericDataLoader

# ----------------------------------------
# This scripts downloads the BEIR compatible deepsetDPR dataset from "Huggingface Datasets" to your local machine.
# Please see dataset's description/readme to learn more about how the dataset was created.
# If you want to use deepset/germandpr without any changes, use TYPE "original"
# If you want to reproduce PM-AI/bi-encoder_msmarco_bert-base_german, use TYPE "processed"
# ----------------------------------------


TYPE = "processed"  # or "original"
SPLIT = "train"  # or "train"
DOWNLOAD_DIR = "germandpr-beir-dataset"
DOWNLOAD_DIR = os.path.join(DOWNLOAD_DIR, f'{TYPE}/{SPLIT}')
DOWNLOAD_QREL_DIR = os.path.join(DOWNLOAD_DIR, f'qrels/')

os.makedirs(DOWNLOAD_QREL_DIR, exist_ok=True)

# for BEIR compatibility we need queries, corpus and qrels all together
# ensure to always load these three based on the same type (all "processed" or all "original")
for subset_name in ["queries", "corpus", "qrels"]:
    subset = datasets.load_dataset("PM-AI/germandpr-beir", f'{TYPE}-{subset_name}', split=SPLIT)
    if subset_name == "qrels":
        out_path = os.path.join(DOWNLOAD_QREL_DIR, f'{SPLIT}.tsv')
        subset.to_csv(out_path, sep="\t", index=False)
    else:
        if subset_name == "queries":
            _row_to_json = lambda row: json.dumps({"_id": row["_id"], "text": row["text"]}, ensure_ascii=False)
        else:
            _row_to_json = lambda row: json.dumps({"_id": row["_id"], "title": row["title"], "text": row["text"]}, ensure_ascii=False)

        with open(os.path.join(DOWNLOAD_DIR, f'{subset_name}.jsonl'), "w", encoding="utf-8") as out_file:
            for row in subset:
                out_file.write(_row_to_json(row) + "\n")


# GenericDataLoader is part of BEIR. If everything is working correctly we can now load the dataset
corpus, queries, qrels = GenericDataLoader(data_folder=DOWNLOAD_DIR).load(SPLIT)
print(f'{SPLIT} corpus size: {len(corpus)}\n'
      f'{SPLIT} queries size: {len(queries)}\n'
      f'{SPLIT} qrels: {len(qrels)}\n')

print("--------------------------------------------------------------------------------------------------------------\n"
      "Now you can use the downloaded files in BEIR framework\n"
      "Example: https://github.com/beir-cellar/beir/blob/v1.0.1/examples/retrieval/evaluation/dense/evaluate_sbert.py\n"
      "--------------------------------------------------------------------------------------------------------------")