Limour commited on
Commit
5b4ee73
1 Parent(s): 33261bd

Upload 4 files

Browse files
H2Retrieval_Dmeta_fix.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install pytrec-eval-terrier
2
+ import pytrec_eval
3
+ import json
4
+ # conda install sentence-transformers -c conda-forge
5
+ from sentence_transformers import SentenceTransformer
6
+ import pandas as pd
7
+ from collections import defaultdict
8
+ import torch
9
+ from tqdm import tqdm
10
+ from tqdm.autonotebook import trange
11
+ import random
12
+
13
+
14
+ if torch.cuda.is_available():
15
+ device = torch.device('cuda')
16
+ else:
17
+ device = torch.device('cpu')
18
+
19
+ def load_dataset(path):
20
+ df = pd.read_parquet(path, engine="pyarrow")
21
+ return df
22
+
23
+ path = r'D:\datasets\H2Retrieval\data_sample5k'
24
+ qrels_pd = load_dataset(path + r'\qrels.parquet.gz')
25
+ corpus = load_dataset(path + r'\corpus.parquet.gz')
26
+ queries = load_dataset(path + r'\queries.parquet.gz')
27
+
28
+ # sample_5k = sorted(random.sample(list(queries['qid'].values), k=5000))
29
+ # queries = queries[queries['qid'].isin(sample_5k)]
30
+ # qrels_pd = qrels_pd[qrels_pd['qid'].isin(sample_5k)]
31
+ # corpus = corpus[corpus['cid'].isin(qrels_pd['cid'])]
32
+ # corpus.to_parquet(
33
+ # r"D:\datasets\H2Retrieval\data_sample5k\corpus.parquet.gz",
34
+ # engine="pyarrow",
35
+ # compression="gzip",
36
+ # index=False
37
+ # )
38
+ # queries.to_parquet(
39
+ # r"D:\datasets\H2Retrieval\data_sample5k\queries.parquet.gz",
40
+ # engine="pyarrow",
41
+ # compression="gzip",
42
+ # index=False
43
+ # )
44
+ # qrels_pd.to_parquet(
45
+ # r"D:\datasets\H2Retrieval\data_sample5k\qrels.parquet.gz",
46
+ # engine="pyarrow",
47
+ # compression="gzip",
48
+ # index=False
49
+ # )
50
+
51
+ qrels = defaultdict(dict)
52
+ for i, e in qrels_pd.iterrows():
53
+ qrels[e['qid']][e['cid']] = e['score']
54
+
55
+ model = SentenceTransformer(r'D:\models\Dmeta', device='cuda:0')
56
+
57
+ corpusEmbeds = model.encode(corpus['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
58
+ queriesEmbeds = model.encode(queries['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
59
+
60
+ queriesEmbeds = torch.tensor(queriesEmbeds, device=device)
61
+ corpusEmbeds = corpusEmbeds.T
62
+ corpusEmbeds = torch.tensor(corpusEmbeds, device=device)
63
+
64
+ def getTopK(corpusEmbeds, qEmbeds, k=10):
65
+ scores = qEmbeds @ corpusEmbeds
66
+ top_k_indices = torch.argsort(scores, descending=True)[:k]
67
+ scores = scores.cpu()
68
+ top_k_indices = top_k_indices.cpu()
69
+ retn = {}
70
+ for x in top_k_indices:
71
+ x = int(x)
72
+ retn[corpus['cid'][x]] = float(scores[x])
73
+ return retn
74
+
75
+ results = {}
76
+ for i in tqdm(range(len(queries)), desc="Converting"):
77
+ results[queries['qid'][i]] = getTopK(corpusEmbeds, queriesEmbeds[i])
78
+
79
+ evaluator = pytrec_eval.RelevanceEvaluator(qrels, {'ndcg'})
80
+ tmp = evaluator.evaluate(results)
81
+ ndcg = 0
82
+ for x in tmp.values():
83
+ ndcg += x['ndcg']
84
+ ndcg /= len(queries)
85
+
86
+ print(f'ndcg_10: {ndcg*100:.2f}%')
data_sample5k/corpus.parquet.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64b4e28cc115b2d25700b67568e4d8bf17ebdc97f443e08637f1b493502de340
3
+ size 2384921
data_sample5k/qrels.parquet.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33df50dea5812644cebea79d9d5e714f2c7d331b726c835444e421c952c766ca
3
+ size 115556
data_sample5k/queries.parquet.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afcab206259b5e918a8889f6b4f0e8eb3c4ffbad139d7b6b147695fe25349817
3
+ size 643048