H2Retrieval / new /H2Retrieval_bce.py
Limour's picture
Upload 9 files
c8b5c28 verified
raw
history blame
2.79 kB
# conda install sentence-transformers -c conda-forge
from sentence_transformers import SentenceTransformer
import pandas as pd
from collections import defaultdict
import torch
from tqdm import tqdm
from new.test_pytrec_eval import ndcg_in_all
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
def load_dataset(path):
df = pd.read_parquet(path, engine="pyarrow")
return df
def load_all_dataset(path, convert=False):
qrels_pd = load_dataset(path + r'\qrels.parquet')
corpus = load_dataset(path + r'\corpus.parquet')
queries = load_dataset(path + r'\queries.parquet')
if convert:
qrels = defaultdict(dict)
for i, e in tqdm(qrels_pd.iterrows(), desc="load_all_dataset: Converting"):
qrels[e['qid']][e['cid']] = e['score']
else:
qrels = qrels_pd
return corpus, queries, qrels
corpus, queries, qrels = load_all_dataset(r'D:\datasets\H2Retrieval\new\data_sample1k')
randEmbed = True
if randEmbed:
corpusEmbeds = torch.rand((1, len(corpus)))
queriesEmbeds = torch.rand((len(queries), 1))
else:
with torch.no_grad():
path = r'D:\models\bce'
model = SentenceTransformer(path, device='cuda:0')
corpusEmbeds = model.encode(corpus['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
queriesEmbeds = model.encode(queries['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
queriesEmbeds = torch.tensor(queriesEmbeds, device=device)
corpusEmbeds = corpusEmbeds.T
corpusEmbeds = torch.tensor(corpusEmbeds, device=device)
@torch.no_grad()
def getTopK(corpusEmbeds, qEmbeds, qid, k=200):
scores = qEmbeds @ corpusEmbeds
top_k_indices = torch.argsort(scores, descending=True)[:k]
scores = scores.cpu()
top_k_indices = top_k_indices.cpu()
retn = []
for x in top_k_indices:
x = int(x)
retn.append((qid, corpus['cid'][x], float(scores[x])))
return retn
def print_ndcgs(k):
with torch.no_grad():
results = []
for i in tqdm(range(len(queries)), desc="Converting"):
results.extend(getTopK(corpusEmbeds, queriesEmbeds[i], queries['qid'][i], k=k))
results = pd.DataFrame(results, columns=['qid', 'cid', 'score'])
results['score'] = results['score'].astype(float)
tmp = ndcg_in_all(qrels, results)
ndcgs = torch.tensor([x for x in tmp.values()], device=device)
mean = torch.mean(ndcgs)
std = torch.std(ndcgs)
print(f'NDCG@{k}: {mean*100:.2f}±{std*100:.2f}')
print_ndcgs(5)
print_ndcgs(10)
print_ndcgs(15)
print_ndcgs(20)
print_ndcgs(30)
# # 手动释放CUDA缓存内存
# del queriesEmbeds
# del corpusEmbeds
# del model
# torch.cuda.empty_cache()