Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
dapr / retrieval.py
kwang2049's picture
readme: retrieval and evaluation
df23a7a
raw
history blame
No virus
3.01 kB
# Please install cldpp with `pip install -U cldpp`
from clddp.retriever import Retriever, RetrieverConfig, Pooling, SimilarityFunction
from clddp.dm import Separator
from typing import Dict
from clddp.dm import Query, Passage
import torch
import pytrec_eval
import numpy as np
from datasets import load_dataset
# Define the retriever (DRAGON+ from https://arxiv.org/abs/2302.07452)
class DRAGONPlus(Retriever):
def __init__(self) -> None:
config = RetrieverConfig(
query_model_name_or_path="facebook/dragon-plus-query-encoder",
passage_model_name_or_path="facebook/dragon-plus-context-encoder",
shared_encoder=False,
sep=Separator.blank,
pooling=Pooling.cls,
similarity_function=SimilarityFunction.dot_product,
query_max_length=512,
passage_max_length=512,
)
super().__init__(config)
# Load data:
passages = load_dataset("kwang2049/dapr", "ConditionalQA-corpus", split="test")
queries = load_dataset("kwang2049/dapr", "ConditionalQA-queries", split="test")
qrels_rows = load_dataset("kwang2049/dapr", "ConditionalQA-qrels", split="test")
qrels: Dict[str, Dict[str, float]] = {}
for qrel_row in qrels_rows:
qid = qrel_row["query_id"]
pid = qrel_row["corpus_id"]
rel = qrel_row["score"]
qrels.setdefault(qid, {})
qrels[qid][pid] = rel
# Encode queries and passages: (refer to https://github.com/kwang2049/clddp/blob/main/examples/search_fiqa.sh for multi-GPU exact search)
retriever = DRAGONPlus()
retriever.eval()
queries = [Query(query_id=query["_id"], text=query["text"]) for query in queries]
passages = [
Passage(passage_id=passage["_id"], text=passage["text"]) for passage in passages
]
query_embeddings = retriever.encode_queries(queries)
with torch.no_grad(): # Takes around a minute on a V100 GPU
passage_embeddings, passage_mask = retriever.encode_passages(passages)
# Calculate the similarities and keep top-K:
similarity_scores = torch.matmul(
query_embeddings, passage_embeddings.t()
) # (query_num, passage_num)
topk = torch.topk(similarity_scores, k=10)
topk_values: torch.Tensor = topk[0]
topk_indices: torch.LongTensor = topk[1]
topk_value_lists = topk_values.tolist()
topk_index_lists = topk_indices.tolist()
# Run evaluation with pytrec_eval:
retrieval_scores: Dict[str, Dict[str, float]] = {}
for query_i, (values, indices) in enumerate(zip(topk_value_lists, topk_index_lists)):
query_id = queries[query_i].query_id
retrieval_scores.setdefault(query_id, {})
for value, passage_i in zip(values, indices):
passage_id = passages[passage_i].passage_id
retrieval_scores[query_id][passage_id] = value
evaluator = pytrec_eval.RelevanceEvaluator(
query_relevance=qrels, measures=["ndcg_cut_10"]
)
query_performances: Dict[str, Dict[str, float]] = evaluator.evaluate(retrieval_scores)
ndcg = np.mean([score["ndcg_cut_10"] for score in query_performances.values()])
print(ndcg) # 0.21796083196880855