File size: 2,787 Bytes
796ec76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8ca558
796ec76
 
 
 
 
 
 
 
 
 
c8ca558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# conda install sentence-transformers -c conda-forge
from sentence_transformers import SentenceTransformer
import pandas as pd
from collections import defaultdict
import torch
from tqdm import tqdm
from test_pytrec_eval import ndcg_in_all

if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')


def load_dataset(path):
    df = pd.read_parquet(path, engine="pyarrow")
    return df


def load_all_dataset(path, convert=False):
    qrels_pd = load_dataset(path + r'\qrels.parquet')
    corpus = load_dataset(path + r'\corpus.parquet')
    queries = load_dataset(path + r'\queries.parquet')
    if convert:
        qrels = defaultdict(dict)
        for i, e in tqdm(qrels_pd.iterrows(), desc="load_all_dataset: Converting"):
            qrels[e['qid']][e['cid']] = e['score']
    else:
        qrels = qrels_pd
    return corpus, queries, qrels


corpus, queries, qrels = load_all_dataset(r'D:\datasets\G2Retrieval\data_sample2k')


randEmbed = False
if randEmbed:
    corpusEmbeds = torch.rand((1, len(corpus)))
    queriesEmbeds = torch.rand((len(queries), 1))
else:
    with torch.no_grad():
        path = r'D:\models\bce'
        model = SentenceTransformer(path, device='cuda:0')

        corpusEmbeds = model.encode(corpus['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
        queriesEmbeds = model.encode(queries['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)

        queriesEmbeds = torch.tensor(queriesEmbeds, device=device)
        corpusEmbeds = corpusEmbeds.T
        corpusEmbeds = torch.tensor(corpusEmbeds, device=device)


@torch.no_grad()
def getTopK(corpusEmbeds, qEmbeds, qid, k=200):
    scores = qEmbeds @ corpusEmbeds
    top_k_indices = torch.argsort(scores, descending=True)[:k]
    scores = scores.cpu()
    top_k_indices = top_k_indices.cpu()
    retn = []
    for x in top_k_indices:
        x = int(x)
        retn.append((qid, corpus['cid'][x], float(scores[x])))
    return retn

def print_ndcgs(k):
    with torch.no_grad():
        results = []
        for i in tqdm(range(len(queries)), desc="Converting"):
            results.extend(getTopK(corpusEmbeds, queriesEmbeds[i], queries['qid'][i], k=k))

    results = pd.DataFrame(results, columns=['qid', 'cid', 'score'])
    results['score'] = results['score'].astype(float)
    tmp = ndcg_in_all(qrels, results)
    ndcgs = torch.tensor([x for x in tmp.values()], device=device)

    mean = torch.mean(ndcgs)
    std = torch.std(ndcgs)

    print(f'NDCG@{k}: {mean*100:.2f}±{std*100:.2f}')

print_ndcgs(3)
print_ndcgs(10)
print_ndcgs(50)
print_ndcgs(100)
print_ndcgs(200)
# # 手动释放CUDA缓存内存
# del queriesEmbeds
# del corpusEmbeds
# del model
# torch.cuda.empty_cache()