readme: retrieval and evaluation
Browse files- .gitignore +2 -1
- README.md +81 -0
- retrieval.py +74 -0
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
./loading.py
|
|
|
|
1 |
+
./loading.py
|
2 |
+
./retrieval.py
|
README.md
CHANGED
@@ -244,5 +244,86 @@ for qrel in qrels:
|
|
244 |
qrel["url"] # url to the document in Wikipedia
|
245 |
```
|
246 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
## Note
|
248 |
This dataset was created with `datasets==2.15.0`. Make sure to use this or a newer version of the datasets library.
|
|
|
244 |
qrel["url"] # url to the document in Wikipedia
|
245 |
```
|
246 |
|
247 |
+
## Retrieval and Evaluation
|
248 |
+
The following shows an example, how the dataset can be used to build a semantic search application.
|
249 |
+
> This example is based on [clddp](https://github.com/kwang2049/clddp/tree/main) (`pip install -U cldpp`). One can further explore this [example](https://github.com/kwang2049/clddp/blob/main/examples/search_fiqa.sh) for convenient multi-GPU exact search.
|
250 |
+
|
251 |
+
```python
|
252 |
+
# Please install cldpp with `pip install -U cldpp`
|
253 |
+
from clddp.retriever import Retriever, RetrieverConfig, Pooling, SimilarityFunction
|
254 |
+
from clddp.dm import Separator
|
255 |
+
from typing import Dict
|
256 |
+
from clddp.dm import Query, Passage
|
257 |
+
import torch
|
258 |
+
import pytrec_eval
|
259 |
+
import numpy as np
|
260 |
+
from datasets import load_dataset
|
261 |
+
|
262 |
+
|
263 |
+
# Define the retriever (DRAGON+ from https://arxiv.org/abs/2302.07452)
|
264 |
+
class DRAGONPlus(Retriever):
|
265 |
+
def __init__(self) -> None:
|
266 |
+
config = RetrieverConfig(
|
267 |
+
query_model_name_or_path="facebook/dragon-plus-query-encoder",
|
268 |
+
passage_model_name_or_path="facebook/dragon-plus-context-encoder",
|
269 |
+
shared_encoder=False,
|
270 |
+
sep=Separator.blank,
|
271 |
+
pooling=Pooling.cls,
|
272 |
+
similarity_function=SimilarityFunction.dot_product,
|
273 |
+
query_max_length=512,
|
274 |
+
passage_max_length=512,
|
275 |
+
)
|
276 |
+
super().__init__(config)
|
277 |
+
|
278 |
+
|
279 |
+
# Load data:
|
280 |
+
passages = load_dataset("kwang2049/dapr", "ConditionalQA-corpus", split="test")
|
281 |
+
queries = load_dataset("kwang2049/dapr", "ConditionalQA-queries", split="test")
|
282 |
+
qrels_rows = load_dataset("kwang2049/dapr", "ConditionalQA-qrels", split="test")
|
283 |
+
qrels: Dict[str, Dict[str, float]] = {}
|
284 |
+
for qrel_row in qrels_rows:
|
285 |
+
qid = qrel_row["query_id"]
|
286 |
+
pid = qrel_row["corpus_id"]
|
287 |
+
rel = qrel_row["score"]
|
288 |
+
qrels.setdefault(qid, {})
|
289 |
+
qrels[qid][pid] = rel
|
290 |
+
|
291 |
+
# Encode queries and passages: (refer to https://github.com/kwang2049/clddp/blob/main/examples/search_fiqa.sh for multi-GPU exact search)
|
292 |
+
retriever = DRAGONPlus()
|
293 |
+
retriever.eval()
|
294 |
+
queries = [Query(query_id=query["_id"], text=query["text"]) for query in queries]
|
295 |
+
passages = [
|
296 |
+
Passage(passage_id=passage["_id"], text=passage["text"]) for passage in passages
|
297 |
+
]
|
298 |
+
query_embeddings = retriever.encode_queries(queries)
|
299 |
+
with torch.no_grad(): # Takes around a minute on a V100 GPU
|
300 |
+
passage_embeddings, passage_mask = retriever.encode_passages(passages)
|
301 |
+
|
302 |
+
# Calculate the similarities and keep top-K:
|
303 |
+
similarity_scores = torch.matmul(
|
304 |
+
query_embeddings, passage_embeddings.t()
|
305 |
+
) # (query_num, passage_num)
|
306 |
+
topk = torch.topk(similarity_scores, k=10)
|
307 |
+
topk_values: torch.Tensor = topk[0]
|
308 |
+
topk_indices: torch.LongTensor = topk[1]
|
309 |
+
topk_value_lists = topk_values.tolist()
|
310 |
+
topk_index_lists = topk_indices.tolist()
|
311 |
+
|
312 |
+
# Run evaluation with pytrec_eval:
|
313 |
+
retrieval_scores: Dict[str, Dict[str, float]] = {}
|
314 |
+
for query_i, (values, indices) in enumerate(zip(topk_value_lists, topk_index_lists)):
|
315 |
+
query_id = queries[query_i].query_id
|
316 |
+
retrieval_scores.setdefault(query_id, {})
|
317 |
+
for value, passage_i in zip(values, indices):
|
318 |
+
passage_id = passages[passage_i].passage_id
|
319 |
+
retrieval_scores[query_id][passage_id] = value
|
320 |
+
evaluator = pytrec_eval.RelevanceEvaluator(
|
321 |
+
query_relevance=qrels, measures=["ndcg_cut_10"]
|
322 |
+
)
|
323 |
+
query_performances: Dict[str, Dict[str, float]] = evaluator.evaluate(retrieval_scores)
|
324 |
+
ndcg = np.mean([score["ndcg_cut_10"] for score in query_performances.values()])
|
325 |
+
print(ndcg) # 0.21796083196880855
|
326 |
+
```
|
327 |
+
|
328 |
## Note
|
329 |
This dataset was created with `datasets==2.15.0`. Make sure to use this or a newer version of the datasets library.
|
retrieval.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Please install cldpp with `pip install -U cldpp`
|
2 |
+
from clddp.retriever import Retriever, RetrieverConfig, Pooling, SimilarityFunction
|
3 |
+
from clddp.dm import Separator
|
4 |
+
from typing import Dict
|
5 |
+
from clddp.dm import Query, Passage
|
6 |
+
import torch
|
7 |
+
import pytrec_eval
|
8 |
+
import numpy as np
|
9 |
+
from datasets import load_dataset
|
10 |
+
|
11 |
+
|
12 |
+
# Define the retriever (DRAGON+ from https://arxiv.org/abs/2302.07452)
|
13 |
+
class DRAGONPlus(Retriever):
|
14 |
+
def __init__(self) -> None:
|
15 |
+
config = RetrieverConfig(
|
16 |
+
query_model_name_or_path="facebook/dragon-plus-query-encoder",
|
17 |
+
passage_model_name_or_path="facebook/dragon-plus-context-encoder",
|
18 |
+
shared_encoder=False,
|
19 |
+
sep=Separator.blank,
|
20 |
+
pooling=Pooling.cls,
|
21 |
+
similarity_function=SimilarityFunction.dot_product,
|
22 |
+
query_max_length=512,
|
23 |
+
passage_max_length=512,
|
24 |
+
)
|
25 |
+
super().__init__(config)
|
26 |
+
|
27 |
+
|
28 |
+
# Load data:
|
29 |
+
passages = load_dataset("kwang2049/dapr", "ConditionalQA-corpus", split="test")
|
30 |
+
queries = load_dataset("kwang2049/dapr", "ConditionalQA-queries", split="test")
|
31 |
+
qrels_rows = load_dataset("kwang2049/dapr", "ConditionalQA-qrels", split="test")
|
32 |
+
qrels: Dict[str, Dict[str, float]] = {}
|
33 |
+
for qrel_row in qrels_rows:
|
34 |
+
qid = qrel_row["query_id"]
|
35 |
+
pid = qrel_row["corpus_id"]
|
36 |
+
rel = qrel_row["score"]
|
37 |
+
qrels.setdefault(qid, {})
|
38 |
+
qrels[qid][pid] = rel
|
39 |
+
|
40 |
+
# Encode queries and passages: (refer to https://github.com/kwang2049/clddp/blob/main/examples/search_fiqa.sh for multi-GPU exact search)
|
41 |
+
retriever = DRAGONPlus()
|
42 |
+
retriever.eval()
|
43 |
+
queries = [Query(query_id=query["_id"], text=query["text"]) for query in queries]
|
44 |
+
passages = [
|
45 |
+
Passage(passage_id=passage["_id"], text=passage["text"]) for passage in passages
|
46 |
+
]
|
47 |
+
query_embeddings = retriever.encode_queries(queries)
|
48 |
+
with torch.no_grad(): # Takes around a minute on a V100 GPU
|
49 |
+
passage_embeddings, passage_mask = retriever.encode_passages(passages)
|
50 |
+
|
51 |
+
# Calculate the similarities and keep top-K:
|
52 |
+
similarity_scores = torch.matmul(
|
53 |
+
query_embeddings, passage_embeddings.t()
|
54 |
+
) # (query_num, passage_num)
|
55 |
+
topk = torch.topk(similarity_scores, k=10)
|
56 |
+
topk_values: torch.Tensor = topk[0]
|
57 |
+
topk_indices: torch.LongTensor = topk[1]
|
58 |
+
topk_value_lists = topk_values.tolist()
|
59 |
+
topk_index_lists = topk_indices.tolist()
|
60 |
+
|
61 |
+
# Run evaluation with pytrec_eval:
|
62 |
+
retrieval_scores: Dict[str, Dict[str, float]] = {}
|
63 |
+
for query_i, (values, indices) in enumerate(zip(topk_value_lists, topk_index_lists)):
|
64 |
+
query_id = queries[query_i].query_id
|
65 |
+
retrieval_scores.setdefault(query_id, {})
|
66 |
+
for value, passage_i in zip(values, indices):
|
67 |
+
passage_id = passages[passage_i].passage_id
|
68 |
+
retrieval_scores[query_id][passage_id] = value
|
69 |
+
evaluator = pytrec_eval.RelevanceEvaluator(
|
70 |
+
query_relevance=qrels, measures=["ndcg_cut_10"]
|
71 |
+
)
|
72 |
+
query_performances: Dict[str, Dict[str, float]] = evaluator.evaluate(retrieval_scores)
|
73 |
+
ndcg = np.mean([score["ndcg_cut_10"] for score in query_performances.values()])
|
74 |
+
print(ndcg) # 0.21796083196880855
|