File size: 4,401 Bytes
e6ae1f8 33261bd e6ae1f8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import pandas as pd
import os
import gzip
import random
import re
from tqdm import tqdm
def get_all_files_in_directory(directory):
all_files = []
for root, dirs, files in os.walk(directory):
root = root[len(directory):]
if root.startswith('\\') or root.startswith('/'):
root = root[1:]
for file in files:
file_path = os.path.join(root, file)
all_files.append(file_path)
return all_files
class Fileset(list):
def __init__(self, path, ext='', _read=None):
if isinstance(path, str):
self.root = path
self.extend(f for f in get_all_files_in_directory(self.root) if f.endswith(ext))
self._read = _read
def __getitem__(self, index):
if isinstance(index, int): # index是索引
if self._read:
return self._read(os.path.join(self.root, super().__getitem__(index)))
else:
return os.path.join(self.root, super().__getitem__(index))
else: # index是切片
fileset = Fileset(None)
fileset.root = self.root
fileset._read = self._read
fileset.extend(super().__getitem__(index))
return fileset
def readOne(filePath):
with gzip.open(filePath, 'rt', encoding='utf-8') if filePath.endswith('.gz') else open(filePath, encoding='utf-8') as f:
retn = [line.strip() for line in f]
return retn
rawcorpus = Fileset(r'D:\datasets\h-corpus\h-ss-corpus','.txt.gz', _read=readOne)
corpus = []
queries = []
qrels = []
reg_4 = re.compile(r'(.)\1{3,}') # 匹配四个或更多连续相同的字符
def has_four_or_more_repeated_chars(text):
return bool(reg_4.search(text))
def randsqidx(tmp):
for i in range(20): # 尝试20次
sqidx = random.randint(10, len(tmp) - 10)
if any(len(tmp[i]) < 20 or len(tmp[i]) > 512 or has_four_or_more_repeated_chars(tmp[i]) for i in range(sqidx-2, sqidx+3)):
continue
return sqidx
return -1
def appendqrels(tmp, sqidx, _range, sr):
qidx = len(queries)
queries.append((qidx, tmp[sqidx]))
if corpus:
cidx = corpus[-1][0] + 3
else:
cidx = 2
for k in _range:
corpus.append((cidx+k, tmp[sqidx+k]))
qrels.append((qidx, cidx+k, sr[k+2]))
def split3(s):
retn = []
cache = ''
for one in s:
cache += one
if len(cache) < 64:
continue
if one in ('?', '!', '。', '?', '!'):
retn.append(cache)
cache = ''
# print(retn)
return retn
def main():
for i in tqdm(range(len(rawcorpus)), desc="Converting"):
tmp = rawcorpus[i]
if len(tmp) < 30:
continue
if random.randint(0, 3):
sqidx = randsqidx(tmp)
if sqidx > 2:
appendqrels(tmp, sqidx, (-2, -1, 1, 2), (0.95, 0.97, 1, 0.97, 0.95))
continue
for s in tmp:
if len(s) <= 512:
continue
s = split3(s)
if len(s) < 3:
continue
sqidx = random.randint(1, len(s)-2)
appendqrels(s, sqidx, (-1, 1), (0.95, 1, 1, 1, 0.95))
break
main()
corpus_pd = pd.DataFrame(corpus, columns=['cid', 'text'], dtype=str)
queries_pd = pd.DataFrame(queries, columns=['qid', 'text'], dtype=str)
qrels_pd = pd.DataFrame(qrels, columns=['qid', 'cid', 'score'], dtype=str)
# def load_dataset(path):
# df = pd.read_parquet(path, engine="pyarrow")
# return df
# corpus_pd = load_dataset(r"D:\datasets\H2Retrieval\data\corpus.parquet.gz")
# queries_pd = load_dataset(r"D:\datasets\H2Retrieval\data\queries.parquet.gz")
# qrels_pd = load_dataset(r"D:\datasets\H2Retrieval\data\qrels.parquet.gz")
corpus_pd['cid'] = corpus_pd['cid'].astype(str)
queries_pd['qid'] = queries_pd['qid'].astype(str)
qrels_pd['qid'] = qrels_pd['qid'].astype(str)
qrels_pd['cid'] = qrels_pd['cid'].astype(str)
qrels_pd['score'] = (qrels_pd['score']*100).astype(int)
corpus_pd.to_parquet(
r"D:\datasets\H2Retrieval\data\corpus.parquet.gz",
engine="pyarrow",
compression="gzip",
)
queries_pd.to_parquet(
r"D:\datasets\H2Retrieval\data\queries.parquet.gz",
engine="pyarrow",
compression="gzip",
)
qrels_pd.to_parquet(
r"D:\datasets\H2Retrieval\data\qrels.parquet.gz",
engine="pyarrow",
compression="gzip",
) |