Upload 4 files
Browse files- H2Retrieval.py +124 -0
- data/corpus.parquet.gz +3 -0
- data/qrels.parquet.gz +3 -0
- data/queries.parquet.gz +3 -0
H2Retrieval.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import os
|
3 |
+
import gzip
|
4 |
+
import random
|
5 |
+
import re
|
6 |
+
from tqdm import tqdm
|
7 |
+
def get_all_files_in_directory(directory):
|
8 |
+
all_files = []
|
9 |
+
for root, dirs, files in os.walk(directory):
|
10 |
+
root = root[len(directory):]
|
11 |
+
if root.startswith('\\') or root.startswith('/'):
|
12 |
+
root = root[1:]
|
13 |
+
for file in files:
|
14 |
+
file_path = os.path.join(root, file)
|
15 |
+
all_files.append(file_path)
|
16 |
+
return all_files
|
17 |
+
|
18 |
+
class Fileset(list):
|
19 |
+
def __init__(self, path, ext='', _read=None):
|
20 |
+
if isinstance(path, str):
|
21 |
+
self.root = path
|
22 |
+
self.extend(f for f in get_all_files_in_directory(self.root) if f.endswith(ext))
|
23 |
+
self._read = _read
|
24 |
+
|
25 |
+
def __getitem__(self, index):
|
26 |
+
if isinstance(index, int): # index是索引
|
27 |
+
if self._read:
|
28 |
+
return self._read(os.path.join(self.root, super().__getitem__(index)))
|
29 |
+
else:
|
30 |
+
return os.path.join(self.root, super().__getitem__(index))
|
31 |
+
else: # index是切片
|
32 |
+
fileset = Fileset(None)
|
33 |
+
fileset.root = self.root
|
34 |
+
fileset._read = self._read
|
35 |
+
fileset.extend(super().__getitem__(index))
|
36 |
+
return fileset
|
37 |
+
|
38 |
+
def readOne(filePath):
|
39 |
+
with gzip.open(filePath, 'rt', encoding='utf-8') if filePath.endswith('.gz') else open(filePath, encoding='utf-8') as f:
|
40 |
+
retn = [line.strip() for line in f]
|
41 |
+
return retn
|
42 |
+
|
43 |
+
rawcorpus = Fileset(r'D:\datasets\h-corpus\h-ss-corpus','.txt.gz', _read=readOne)
|
44 |
+
corpus = []
|
45 |
+
queries = []
|
46 |
+
qrels = []
|
47 |
+
|
48 |
+
reg_4 = re.compile(r'(.)\1{3,}') # 匹配四个或更多连续相同的字符
|
49 |
+
def has_four_or_more_repeated_chars(text):
|
50 |
+
return bool(reg_4.search(text))
|
51 |
+
|
52 |
+
def randsqidx(tmp):
|
53 |
+
for i in range(20): # 尝试20次
|
54 |
+
sqidx = random.randint(10, len(tmp) - 10)
|
55 |
+
if any(len(tmp[i]) < 20 or len(tmp[i]) > 512 or has_four_or_more_repeated_chars(tmp[i]) for i in range(sqidx-2, sqidx+3)):
|
56 |
+
continue
|
57 |
+
return sqidx
|
58 |
+
return -1
|
59 |
+
|
60 |
+
def appendqrels(tmp, sqidx, _range, sr):
|
61 |
+
qidx = len(queries)
|
62 |
+
queries.append((qidx, tmp[sqidx]))
|
63 |
+
if corpus:
|
64 |
+
cidx = corpus[-1][0] + 3
|
65 |
+
else:
|
66 |
+
cidx = 2
|
67 |
+
for k in _range:
|
68 |
+
corpus.append((cidx+k, tmp[sqidx+k]))
|
69 |
+
qrels.append((qidx, cidx+k, sr[k+2]))
|
70 |
+
|
71 |
+
def split3(s):
|
72 |
+
retn = []
|
73 |
+
cache = ''
|
74 |
+
for one in s:
|
75 |
+
cache += one
|
76 |
+
if len(cache) < 64:
|
77 |
+
continue
|
78 |
+
if one in ('?', '!', '。', '?', '!'):
|
79 |
+
retn.append(cache)
|
80 |
+
cache = ''
|
81 |
+
# print(retn)
|
82 |
+
return retn
|
83 |
+
|
84 |
+
def main():
|
85 |
+
for i in tqdm(range(len(rawcorpus)), desc="Converting"):
|
86 |
+
tmp = rawcorpus[i]
|
87 |
+
if len(tmp) < 30:
|
88 |
+
continue
|
89 |
+
if random.randint(0, 3):
|
90 |
+
sqidx = randsqidx(tmp)
|
91 |
+
if sqidx > 2:
|
92 |
+
appendqrels(tmp, sqidx, (-2, -1, 1, 2), (0.95, 0.97, 1, 0.97, 0.95))
|
93 |
+
continue
|
94 |
+
for s in tmp:
|
95 |
+
if len(s) <= 512:
|
96 |
+
continue
|
97 |
+
s = split3(s)
|
98 |
+
if len(s) < 3:
|
99 |
+
continue
|
100 |
+
sqidx = random.randint(1, len(s)-2)
|
101 |
+
appendqrels(s, sqidx, (-1, 1), (0.95, 1, 1, 1, 0.95))
|
102 |
+
break
|
103 |
+
|
104 |
+
main()
|
105 |
+
|
106 |
+
corpus_pd = pd.DataFrame(corpus, columns=['cid', 'text'])
|
107 |
+
queries_pd = pd.DataFrame(queries, columns=['qid', 'text'])
|
108 |
+
qrels_pd = pd.DataFrame(qrels, columns=['qid', 'cid', 'score'])
|
109 |
+
|
110 |
+
corpus_pd.to_parquet(
|
111 |
+
r"D:\datasets\H2Retrieval\data\corpus.parquet.gz",
|
112 |
+
engine="pyarrow",
|
113 |
+
compression="gzip",
|
114 |
+
)
|
115 |
+
queries_pd.to_parquet(
|
116 |
+
r"D:\datasets\H2Retrieval\data\queries.parquet.gz",
|
117 |
+
engine="pyarrow",
|
118 |
+
compression="gzip",
|
119 |
+
)
|
120 |
+
qrels_pd.to_parquet(
|
121 |
+
r"D:\datasets\H2Retrieval\data\qrels.parquet.gz",
|
122 |
+
engine="pyarrow",
|
123 |
+
compression="gzip",
|
124 |
+
)
|
data/corpus.parquet.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d91eb565e7fb2179f208351550888e70a048129c96ef15b5797a828d18d4ea7
|
3 |
+
size 62677530
|
data/qrels.parquet.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23e25ecd898b92614c846bb744db41789cb9a163f0c413e77cf716ca00092eb9
|
3 |
+
size 2247573
|
data/queries.parquet.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60bdc6f49f2c553f448f23ab67b2f2f69e1cf186cb68f82fdf7f9bd109495646
|
3 |
+
size 16975525
|