# pip install pytrec-eval-terrier import pytrec_eval import json # conda install sentence-transformers -c conda-forge from sentence_transformers import SentenceTransformer import pandas as pd from collections import defaultdict import torch from tqdm import tqdm from tqdm.autonotebook import trange import random if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') def load_dataset(path): df = pd.read_parquet(path, engine="pyarrow") return df path = r'D:\datasets\H2Retrieval\data_sample5k' qrels_pd = load_dataset(path + r'\qrels.parquet.gz') corpus = load_dataset(path + r'\corpus.parquet.gz') queries = load_dataset(path + r'\queries.parquet.gz') # sample_5k = sorted(random.sample(list(queries['qid'].values), k=5000)) # queries = queries[queries['qid'].isin(sample_5k)] # qrels_pd = qrels_pd[qrels_pd['qid'].isin(sample_5k)] # corpus = corpus[corpus['cid'].isin(qrels_pd['cid'])] # corpus.to_parquet( # r"D:\datasets\H2Retrieval\data_sample5k\corpus.parquet.gz", # engine="pyarrow", # compression="gzip", # index=False # ) # queries.to_parquet( # r"D:\datasets\H2Retrieval\data_sample5k\queries.parquet.gz", # engine="pyarrow", # compression="gzip", # index=False # ) # qrels_pd.to_parquet( # r"D:\datasets\H2Retrieval\data_sample5k\qrels.parquet.gz", # engine="pyarrow", # compression="gzip", # index=False # ) qrels = defaultdict(dict) for i, e in qrels_pd.iterrows(): qrels[e['qid']][e['cid']] = e['score'] model = SentenceTransformer(r'D:\models\Dmeta', device='cuda:0') corpusEmbeds = model.encode(corpus['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32) queriesEmbeds = model.encode(queries['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32) queriesEmbeds = torch.tensor(queriesEmbeds, device=device) corpusEmbeds = corpusEmbeds.T corpusEmbeds = torch.tensor(corpusEmbeds, device=device) def getTopK(corpusEmbeds, qEmbeds, k=10): scores = qEmbeds @ corpusEmbeds top_k_indices = torch.argsort(scores, descending=True)[:k] scores = scores.cpu() top_k_indices = top_k_indices.cpu() retn = {} for x in top_k_indices: x = int(x) retn[corpus['cid'][x]] = float(scores[x]) return retn results = {} for i in tqdm(range(len(queries)), desc="Converting"): results[queries['qid'][i]] = getTopK(corpusEmbeds, queriesEmbeds[i]) evaluator = pytrec_eval.RelevanceEvaluator(qrels, {'ndcg'}) tmp = evaluator.evaluate(results) ndcg = 0 for x in tmp.values(): ndcg += x['ndcg'] ndcg /= len(queries) print(f'ndcg_10: {ndcg*100:.2f}%')