Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
File size: 3,006 Bytes
df23a7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# Please install cldpp with `pip install -U cldpp`
from clddp.retriever import Retriever, RetrieverConfig, Pooling, SimilarityFunction
from clddp.dm import Separator
from typing import Dict
from clddp.dm import Query, Passage
import torch
import pytrec_eval
import numpy as np
from datasets import load_dataset


# Define the retriever (DRAGON+ from https://arxiv.org/abs/2302.07452)
class DRAGONPlus(Retriever):
    def __init__(self) -> None:
        config = RetrieverConfig(
            query_model_name_or_path="facebook/dragon-plus-query-encoder",
            passage_model_name_or_path="facebook/dragon-plus-context-encoder",
            shared_encoder=False,
            sep=Separator.blank,
            pooling=Pooling.cls,
            similarity_function=SimilarityFunction.dot_product,
            query_max_length=512,
            passage_max_length=512,
        )
        super().__init__(config)


# Load data:
passages = load_dataset("kwang2049/dapr", "ConditionalQA-corpus", split="test")
queries = load_dataset("kwang2049/dapr", "ConditionalQA-queries", split="test")
qrels_rows = load_dataset("kwang2049/dapr", "ConditionalQA-qrels", split="test")
qrels: Dict[str, Dict[str, float]] = {}
for qrel_row in qrels_rows:
    qid = qrel_row["query_id"]
    pid = qrel_row["corpus_id"]
    rel = qrel_row["score"]
    qrels.setdefault(qid, {})
    qrels[qid][pid] = rel

# Encode queries and passages: (refer to https://github.com/kwang2049/clddp/blob/main/examples/search_fiqa.sh for multi-GPU exact search)
retriever = DRAGONPlus()
retriever.eval()
queries = [Query(query_id=query["_id"], text=query["text"]) for query in queries]
passages = [
    Passage(passage_id=passage["_id"], text=passage["text"]) for passage in passages
]
query_embeddings = retriever.encode_queries(queries)
with torch.no_grad():  # Takes around a minute on a V100 GPU
    passage_embeddings, passage_mask = retriever.encode_passages(passages)

# Calculate the similarities and keep top-K:
similarity_scores = torch.matmul(
    query_embeddings, passage_embeddings.t()
)  # (query_num, passage_num)
topk = torch.topk(similarity_scores, k=10)
topk_values: torch.Tensor = topk[0]
topk_indices: torch.LongTensor = topk[1]
topk_value_lists = topk_values.tolist()
topk_index_lists = topk_indices.tolist()

# Run evaluation with pytrec_eval:
retrieval_scores: Dict[str, Dict[str, float]] = {}
for query_i, (values, indices) in enumerate(zip(topk_value_lists, topk_index_lists)):
    query_id = queries[query_i].query_id
    retrieval_scores.setdefault(query_id, {})
    for value, passage_i in zip(values, indices):
        passage_id = passages[passage_i].passage_id
        retrieval_scores[query_id][passage_id] = value
evaluator = pytrec_eval.RelevanceEvaluator(
    query_relevance=qrels, measures=["ndcg_cut_10"]
)
query_performances: Dict[str, Dict[str, float]] = evaluator.evaluate(retrieval_scores)
ndcg = np.mean([score["ndcg_cut_10"] for score in query_performances.values()])
print(ndcg)  # 0.21796083196880855