Upload 9 files
Browse files- new/H2Retrieval_bce.py +90 -0
- new/data/corpus.parquet +3 -0
- new/data/qrels.parquet +3 -0
- new/data/queries.parquet +3 -0
- new/data_sample1k/corpus.parquet +3 -0
- new/data_sample1k/qrels.parquet +3 -0
- new/data_sample1k/queries.parquet +3 -0
- new/main.py +173 -0
- new/test_pytrec_eval.py +72 -0
new/H2Retrieval_bce.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# conda install sentence-transformers -c conda-forge
|
2 |
+
from sentence_transformers import SentenceTransformer
|
3 |
+
import pandas as pd
|
4 |
+
from collections import defaultdict
|
5 |
+
import torch
|
6 |
+
from tqdm import tqdm
|
7 |
+
from new.test_pytrec_eval import ndcg_in_all
|
8 |
+
|
9 |
+
if torch.cuda.is_available():
|
10 |
+
device = torch.device('cuda')
|
11 |
+
else:
|
12 |
+
device = torch.device('cpu')
|
13 |
+
|
14 |
+
|
15 |
+
def load_dataset(path):
|
16 |
+
df = pd.read_parquet(path, engine="pyarrow")
|
17 |
+
return df
|
18 |
+
|
19 |
+
|
20 |
+
def load_all_dataset(path, convert=False):
|
21 |
+
qrels_pd = load_dataset(path + r'\qrels.parquet')
|
22 |
+
corpus = load_dataset(path + r'\corpus.parquet')
|
23 |
+
queries = load_dataset(path + r'\queries.parquet')
|
24 |
+
if convert:
|
25 |
+
qrels = defaultdict(dict)
|
26 |
+
for i, e in tqdm(qrels_pd.iterrows(), desc="load_all_dataset: Converting"):
|
27 |
+
qrels[e['qid']][e['cid']] = e['score']
|
28 |
+
else:
|
29 |
+
qrels = qrels_pd
|
30 |
+
return corpus, queries, qrels
|
31 |
+
|
32 |
+
|
33 |
+
corpus, queries, qrels = load_all_dataset(r'D:\datasets\H2Retrieval\new\data_sample1k')
|
34 |
+
|
35 |
+
|
36 |
+
randEmbed = True
|
37 |
+
if randEmbed:
|
38 |
+
corpusEmbeds = torch.rand((1, len(corpus)))
|
39 |
+
queriesEmbeds = torch.rand((len(queries), 1))
|
40 |
+
else:
|
41 |
+
with torch.no_grad():
|
42 |
+
path = r'D:\models\bce'
|
43 |
+
model = SentenceTransformer(path, device='cuda:0')
|
44 |
+
|
45 |
+
corpusEmbeds = model.encode(corpus['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
|
46 |
+
queriesEmbeds = model.encode(queries['text'].values, normalize_embeddings=True, show_progress_bar=True, batch_size=32)
|
47 |
+
|
48 |
+
queriesEmbeds = torch.tensor(queriesEmbeds, device=device)
|
49 |
+
corpusEmbeds = corpusEmbeds.T
|
50 |
+
corpusEmbeds = torch.tensor(corpusEmbeds, device=device)
|
51 |
+
|
52 |
+
|
53 |
+
@torch.no_grad()
|
54 |
+
def getTopK(corpusEmbeds, qEmbeds, qid, k=200):
|
55 |
+
scores = qEmbeds @ corpusEmbeds
|
56 |
+
top_k_indices = torch.argsort(scores, descending=True)[:k]
|
57 |
+
scores = scores.cpu()
|
58 |
+
top_k_indices = top_k_indices.cpu()
|
59 |
+
retn = []
|
60 |
+
for x in top_k_indices:
|
61 |
+
x = int(x)
|
62 |
+
retn.append((qid, corpus['cid'][x], float(scores[x])))
|
63 |
+
return retn
|
64 |
+
|
65 |
+
def print_ndcgs(k):
|
66 |
+
with torch.no_grad():
|
67 |
+
results = []
|
68 |
+
for i in tqdm(range(len(queries)), desc="Converting"):
|
69 |
+
results.extend(getTopK(corpusEmbeds, queriesEmbeds[i], queries['qid'][i], k=k))
|
70 |
+
|
71 |
+
results = pd.DataFrame(results, columns=['qid', 'cid', 'score'])
|
72 |
+
results['score'] = results['score'].astype(float)
|
73 |
+
tmp = ndcg_in_all(qrels, results)
|
74 |
+
ndcgs = torch.tensor([x for x in tmp.values()], device=device)
|
75 |
+
|
76 |
+
mean = torch.mean(ndcgs)
|
77 |
+
std = torch.std(ndcgs)
|
78 |
+
|
79 |
+
print(f'NDCG@{k}: {mean*100:.2f}±{std*100:.2f}')
|
80 |
+
|
81 |
+
print_ndcgs(5)
|
82 |
+
print_ndcgs(10)
|
83 |
+
print_ndcgs(15)
|
84 |
+
print_ndcgs(20)
|
85 |
+
print_ndcgs(30)
|
86 |
+
# # 手动释放CUDA缓存内存
|
87 |
+
# del queriesEmbeds
|
88 |
+
# del corpusEmbeds
|
89 |
+
# del model
|
90 |
+
# torch.cuda.empty_cache()
|
new/data/corpus.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d67866969f5e82dc3b85fe0ed9c43ea96253b9c89463c1cf643e2cdfb6420f88
|
3 |
+
size 42572321
|
new/data/qrels.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35fb7a5c36f5b00cf35ad083900d2a9ccc9ef0eb38ef22b2adc9743a658bf8ad
|
3 |
+
size 417621
|
new/data/queries.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0c22d59d9d620ae7f6b5d9f12202d8e7dc8965f92e2769ac46cf561e9961deb
|
3 |
+
size 2898062
|
new/data_sample1k/corpus.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:953ac409e644e67ac0776b26deed2b4f180bdeac4cccc05fa91d4eb62e529287
|
3 |
+
size 8605887
|
new/data_sample1k/qrels.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cdda30f96e6b7173affc1071e4e7a12c5cac917d97e44acc8b0451c5e432b99d
|
3 |
+
size 78964
|
new/data_sample1k/queries.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4b188665a79683c00ea9a072aa4ef91f0a3939cf971a9a5af41f75bd64c508e8
|
3 |
+
size 584130
|
new/main.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import os
|
3 |
+
import gzip
|
4 |
+
import random
|
5 |
+
import re
|
6 |
+
from tqdm import tqdm
|
7 |
+
from collections import defaultdict
|
8 |
+
|
9 |
+
|
10 |
+
def get_all_files_in_directory(directory, ext=''):
|
11 |
+
all_files = []
|
12 |
+
for root, dirs, files in os.walk(directory):
|
13 |
+
root = root[len(directory):]
|
14 |
+
if root.startswith('\\') or root.startswith('/'):
|
15 |
+
root = root[1:]
|
16 |
+
for file in files:
|
17 |
+
if file.endswith(ext):
|
18 |
+
file_path = os.path.join(root, file)
|
19 |
+
all_files.append(file_path)
|
20 |
+
return all_files
|
21 |
+
|
22 |
+
reg_q = re.compile(r'''['"“”‘’「」『』]''')
|
23 |
+
reg_e = re.compile(r'''[?!。?!]''')
|
24 |
+
def readOne(filePath):
|
25 |
+
with gzip.open(filePath, 'rt', encoding='utf-8') if filePath.endswith('.gz') else open(filePath,
|
26 |
+
encoding='utf-8') as f:
|
27 |
+
retn = []
|
28 |
+
cache = ''
|
29 |
+
for line in f:
|
30 |
+
line = reg_q.sub('', line) # 删除引号
|
31 |
+
if len(cache) + len(line) < 384:
|
32 |
+
cache += line
|
33 |
+
continue
|
34 |
+
if not bool(reg_e.findall(line)):
|
35 |
+
cache += line
|
36 |
+
retn.append(cache.strip())
|
37 |
+
cache = ''
|
38 |
+
continue
|
39 |
+
i = 1
|
40 |
+
s = 0
|
41 |
+
while i <= len(line):
|
42 |
+
if len(cache) + (i - s) < 384: # 每 384 切一行
|
43 |
+
i = (384 - len(cache)) + s
|
44 |
+
if i > len(line):
|
45 |
+
break
|
46 |
+
cache += line[s:i]
|
47 |
+
s = i
|
48 |
+
if line[i-1] in ('?', '!', '。', '?', '!'):
|
49 |
+
cache += line[s:i]
|
50 |
+
s = i
|
51 |
+
retn.append(cache.strip())
|
52 |
+
cache = ''
|
53 |
+
i += 1
|
54 |
+
if len(line) > s:
|
55 |
+
cache += line[s:]
|
56 |
+
|
57 |
+
cache = cache.strip()
|
58 |
+
if cache:
|
59 |
+
retn.append(cache)
|
60 |
+
return retn
|
61 |
+
|
62 |
+
|
63 |
+
def load_dataset(path):
|
64 |
+
df = pd.read_parquet(path, engine="pyarrow")
|
65 |
+
return df
|
66 |
+
|
67 |
+
|
68 |
+
def load_all_dataset(path, convert=False):
|
69 |
+
qrels_pd = load_dataset(path + r'\qrels.parquet')
|
70 |
+
corpus = load_dataset(path + r'\corpus.parquet')
|
71 |
+
queries = load_dataset(path + r'\queries.parquet')
|
72 |
+
if convert:
|
73 |
+
qrels = defaultdict(dict)
|
74 |
+
for i, e in tqdm(qrels_pd.iterrows(), desc="load_all_dataset: Converting"):
|
75 |
+
qrels[e['qid']][e['cid']] = e['score']
|
76 |
+
else:
|
77 |
+
qrels = qrels_pd
|
78 |
+
return corpus, queries, qrels
|
79 |
+
|
80 |
+
|
81 |
+
def save_dataset(path, df):
|
82 |
+
return df.to_parquet(
|
83 |
+
path,
|
84 |
+
engine="pyarrow",
|
85 |
+
compression="gzip",
|
86 |
+
index=False
|
87 |
+
)
|
88 |
+
|
89 |
+
|
90 |
+
def save_all_dataset(path, corpus, queries, qrels):
|
91 |
+
save_dataset(path + r"\corpus.parquet", corpus)
|
92 |
+
save_dataset(path + r"\queries.parquet", queries)
|
93 |
+
save_dataset(path + r"\qrels.parquet", qrels)
|
94 |
+
|
95 |
+
|
96 |
+
def create_dataset(corpus, queries, qrels):
|
97 |
+
corpus_pd = pd.DataFrame(corpus, columns=['cid', 'text'])
|
98 |
+
queries_pd = pd.DataFrame(queries, columns=['qid', 'text'])
|
99 |
+
qrels_pd = pd.DataFrame(qrels, columns=['qid', 'cid', 'score'])
|
100 |
+
|
101 |
+
corpus_pd['cid'] = corpus_pd['cid'].astype(str)
|
102 |
+
queries_pd['qid'] = queries_pd['qid'].astype(str)
|
103 |
+
qrels_pd['qid'] = qrels_pd['qid'].astype(str)
|
104 |
+
qrels_pd['cid'] = qrels_pd['cid'].astype(str)
|
105 |
+
qrels_pd['score'] = qrels_pd['score'].astype(int)
|
106 |
+
|
107 |
+
return corpus_pd, queries_pd, qrels_pd
|
108 |
+
|
109 |
+
|
110 |
+
def sample_from_dataset(corpus, queries, qrels, k=5000):
|
111 |
+
sample_k = sorted(random.sample(queries['qid'].to_list(), k=k))
|
112 |
+
queries_pd = queries[queries['qid'].isin(sample_k)]
|
113 |
+
qrels_pd = qrels[qrels['qid'].isin(sample_k)]
|
114 |
+
corpus_pd = corpus[corpus['cid'].isin(qrels_pd['cid'])]
|
115 |
+
|
116 |
+
return corpus_pd, queries_pd, qrels_pd
|
117 |
+
|
118 |
+
path = r'D:\datasets\h-corpus\h-ss-corpus'
|
119 |
+
rawcorpus = get_all_files_in_directory(path, '.txt.gz')
|
120 |
+
corpus = []
|
121 |
+
queries = []
|
122 |
+
qrels = []
|
123 |
+
|
124 |
+
for sub_path in tqdm(rawcorpus[103045:], desc="Reading all data..."):
|
125 |
+
tmp = readOne(os.path.join(path, sub_path))
|
126 |
+
if len(tmp) < 5:
|
127 |
+
continue
|
128 |
+
阈值 = max(len(tmp) // 4, 4) # 大约每个文件抽 4*5 = 20 条语料
|
129 |
+
# print(阈值)
|
130 |
+
old_rand = None
|
131 |
+
for i in range(len(tmp)):
|
132 |
+
rand = random.randint(0, 阈值)
|
133 |
+
if rand == 0 and (old_rand is None or old_rand != 0):
|
134 |
+
queries.append((sub_path, i/(len(tmp)-1), tmp[i]))
|
135 |
+
elif rand <= 4 or old_rand == 0:
|
136 |
+
corpus.append((sub_path, i/(len(tmp)-1), tmp[i]))
|
137 |
+
rand = 1
|
138 |
+
else:
|
139 |
+
pass
|
140 |
+
old_rand = rand
|
141 |
+
|
142 |
+
tmp = random.sample(range(len(queries)), k=5000)
|
143 |
+
tmp.sort()
|
144 |
+
queries = [queries[i] for i in tmp]
|
145 |
+
|
146 |
+
sidx = 0
|
147 |
+
for qid, q in tqdm(enumerate(queries), desc="计算 qrels 中..."):
|
148 |
+
mt = False
|
149 |
+
for cid in range(sidx, len(corpus)):
|
150 |
+
c = corpus[cid]
|
151 |
+
if q[0] == c[0]:
|
152 |
+
mt = True
|
153 |
+
ss = 1 - abs(q[1] - c[1])
|
154 |
+
qrels.append((qid, cid, 100 * ss))
|
155 |
+
else:
|
156 |
+
if mt:
|
157 |
+
sidx = cid + 1
|
158 |
+
break
|
159 |
+
|
160 |
+
corpus_ = [(cid, c[2]) for cid, c in enumerate(corpus)]
|
161 |
+
queries_ = [(qid, q[2]) for qid, q in enumerate(queries)]
|
162 |
+
|
163 |
+
path = r'D:\datasets\H2Retrieval\new'
|
164 |
+
corpus_pd, queries_pd, qrels_pd = create_dataset(corpus_, queries_, qrels)
|
165 |
+
tmp = corpus_pd[corpus_pd['cid'].isin(qrels_pd['cid'])]
|
166 |
+
corpus_pd = tmp
|
167 |
+
save_all_dataset(path + r'\data', corpus_pd, queries_pd, qrels_pd)
|
168 |
+
save_all_dataset(path + r'\data_sample1k', *sample_from_dataset(corpus_pd, queries_pd, qrels_pd, k=1000))
|
169 |
+
|
170 |
+
|
171 |
+
# save_all_dataset(path + r'\data_sample1k', *sample_from_dataset(*load_all_dataset(r'D:\datasets\H2Retrieval\new\data_sample5k'), k=1000))
|
172 |
+
|
173 |
+
tmp = load_all_dataset(r'D:\datasets\H2Retrieval\new\data')
|
new/test_pytrec_eval.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
from tqdm import tqdm
|
4 |
+
|
5 |
+
|
6 |
+
def dcg(scores):
|
7 |
+
log2_i = np.log2(np.arange(2, len(scores) + 2))
|
8 |
+
return np.sum(scores / log2_i)
|
9 |
+
|
10 |
+
|
11 |
+
def idcg(rels, topk):
|
12 |
+
return dcg(np.sort(rels)[::-1][:topk])
|
13 |
+
|
14 |
+
|
15 |
+
def odcg(rels, predictions):
|
16 |
+
indices = np.argsort(predictions)[::-1]
|
17 |
+
return dcg(rels[indices])
|
18 |
+
|
19 |
+
|
20 |
+
def _ndcg(drels, dpredictions):
|
21 |
+
topk = len(dpredictions)
|
22 |
+
_idcg = idcg(np.array(drels['score']), topk)
|
23 |
+
tmp = drels[drels.index.isin(dpredictions.index)]
|
24 |
+
rels = dpredictions['score'].copy()
|
25 |
+
rels *= 0
|
26 |
+
rels.update(tmp['score'])
|
27 |
+
_odcg = odcg(rels.values, dpredictions['score'].values)
|
28 |
+
return float(_odcg / _idcg)
|
29 |
+
|
30 |
+
|
31 |
+
def ndcg(qrels, results):
|
32 |
+
drels = qrels.set_index('cid', inplace=False)
|
33 |
+
dpredictions = results.set_index('cid', inplace=False)
|
34 |
+
# print(drels, dpredictions)
|
35 |
+
return _ndcg(drels, dpredictions)
|
36 |
+
|
37 |
+
|
38 |
+
def ndcg_in_all(qrels, results):
|
39 |
+
retn = {}
|
40 |
+
_qrels = {qid: group for qid, group in qrels.groupby('qid')}
|
41 |
+
_results = {qid: group for qid, group in results.groupby('qid')}
|
42 |
+
for qid in tqdm(_results, desc="计算 ndcg 中..."):
|
43 |
+
if qid in _qrels:
|
44 |
+
retn[qid] = ndcg(_qrels[qid], _results[qid])
|
45 |
+
return retn
|
46 |
+
|
47 |
+
|
48 |
+
if __name__ == '__main__':
|
49 |
+
qrels = pd.DataFrame(
|
50 |
+
[
|
51 |
+
['q1', 'd1', 1],
|
52 |
+
['q1', 'd2', 2],
|
53 |
+
['q1', 'd3', 3],
|
54 |
+
['q1', 'd4', 4],
|
55 |
+
['q2', 'd1', 2],
|
56 |
+
['q2', 'd2', 1]
|
57 |
+
],
|
58 |
+
columns=['qid', 'cid', 'score']
|
59 |
+
)
|
60 |
+
|
61 |
+
results = pd.DataFrame(
|
62 |
+
[
|
63 |
+
['q1', 'd2', 1],
|
64 |
+
['q1', 'd3', 2],
|
65 |
+
['q1', 'd4', 3],
|
66 |
+
['q2', 'd2', 1],
|
67 |
+
['q2', 'd3', 2],
|
68 |
+
['q2', 'd5', 2]
|
69 |
+
],
|
70 |
+
columns=['qid', 'cid', 'score']
|
71 |
+
)
|
72 |
+
print(ndcg_in_all(qrels, results))
|