File size: 2,666 Bytes
5b4ee73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# pip install pytrec-eval-terrier
import pytrec_eval
import json
# conda install sentence-transformers -c conda-forge
from sentence_transformers import SentenceTransformer
import pandas as pd
from collections import defaultdict
import torch
from tqdm import tqdm
from tqdm.autonotebook import trange
import random


if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

def load_dataset(path):
    df = pd.read_parquet(path, engine="pyarrow")
    return df

path = r'D:\datasets\H2Retrieval\data_sample5k'
qrels_pd = load_dataset(path + r'\qrels.parquet.gz')
corpus = load_dataset(path + r'\corpus.parquet.gz')
queries = load_dataset(path + r'\queries.parquet.gz')

# sample_5k = sorted(random.sample(list(queries['qid'].values), k=5000))
# queries = queries[queries['qid'].isin(sample_5k)]
# qrels_pd = qrels_pd[qrels_pd['qid'].isin(sample_5k)]
# corpus = corpus[corpus['cid'].isin(qrels_pd['cid'])]
# corpus.to_parquet(
#     r"D:\datasets\H2Retrieval\data_sample5k\corpus.parquet.gz",
#     engine="pyarrow",
#     compression="gzip",
#     index=False
# )
# queries.to_parquet(
#     r"D:\datasets\H2Retrieval\data_sample5k\queries.parquet.gz",
#     engine="pyarrow",
#     compression="gzip",
#     index=False
# )
# qrels_pd.to_parquet(
#     r"D:\datasets\H2Retrieval\data_sample5k\qrels.parquet.gz",
#     engine="pyarrow",
#     compression="gzip",
#     index=False
# )

qrels = defaultdict(dict)
for i, e in qrels_pd.iterrows():
    qrels[e['qid']][e['cid']] = e['score']

model = SentenceTransformer(r'D:\models\Dmeta', device='cuda:0')

corpusEmbeds = model.encode(corpus['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
queriesEmbeds = model.encode(queries['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)

queriesEmbeds = torch.tensor(queriesEmbeds, device=device)
corpusEmbeds = corpusEmbeds.T
corpusEmbeds = torch.tensor(corpusEmbeds, device=device)

def getTopK(corpusEmbeds, qEmbeds, k=10):
    scores = qEmbeds @ corpusEmbeds
    top_k_indices = torch.argsort(scores, descending=True)[:k]
    scores = scores.cpu()
    top_k_indices = top_k_indices.cpu()
    retn = {}
    for x in top_k_indices:
        x = int(x)
        retn[corpus['cid'][x]] = float(scores[x])
    return retn

results = {}
for i in tqdm(range(len(queries)), desc="Converting"):
    results[queries['qid'][i]] = getTopK(corpusEmbeds, queriesEmbeds[i])

evaluator = pytrec_eval.RelevanceEvaluator(qrels, {'ndcg'})
tmp = evaluator.evaluate(results)
ndcg = 0
for x in tmp.values():
    ndcg += x['ndcg']
ndcg /= len(queries)

print(f'ndcg_10: {ndcg*100:.2f}%')