Datasets:
Tags:
License:
import json | |
import logging | |
from pathlib import Path | |
import re | |
from typing import Dict, List, Tuple | |
import click | |
import ir_datasets | |
from hashlib import md5 | |
from ir_datasets.datasets.trec_cast import ( | |
NAME, | |
ColonCommaDupes, | |
WapoDupes, | |
DownloadConfig, | |
) | |
logging.basicConfig(level=logging.INFO) | |
base_path = ir_datasets.util.home_path() / NAME | |
dlc = DownloadConfig.context(NAME, base_path) | |
RE_TOKEN_MATCHER = re.compile(r"\S") | |
class Feeder: | |
def __init__(self, base: str, mainpath: Path): | |
self.feeders = [self.feeder(path) for path in mainpath.glob(f"{base}_*.jsonl")] | |
self.current = [next(feeder) for feeder in self.feeders] | |
def feeder(self, path: Path): | |
with path.open("rt") as fp: | |
for line in fp: | |
data = json.loads(line) | |
yield f"{data['id']}", [content["body"] for content in data["contents"]] | |
while True: | |
yield "", "" | |
def get(self, doc_id: str): | |
for ix, (key, body) in enumerate(self.current): | |
if key == doc_id: | |
self.current[ix] = next(self.feeders[ix]) | |
return body | |
assert False, f"Could not find {doc_id}" | |
class Range: | |
def __init__(self, text: str, pos: int): | |
self.text = text | |
self.start = pos | |
self.end = pos + 1 | |
def process_next(self, target: str): | |
ix = self.end | |
while ix < len(self.text): | |
c = self.text[ix] | |
ix += 1 | |
if not c.isspace(): | |
if c == target: | |
self.end = ix | |
return True | |
return False | |
return None | |
def limits(self): | |
return self.start, self.end | |
def find_ranges(doc_id: str, body: str, passages: List[str]): | |
# Due to a bug in chunking in CaST, we need a clever strategy | |
# to recover ranges... so we find words from the passages | |
passage_ranges: List[List[Tuple[int, int]]] = [] | |
for passage in passages: | |
passage = passage.strip() | |
c_ranges: List[Range] = [] | |
ranges: List[Tuple[int, int]] = [] | |
for m in RE_TOKEN_MATCHER.finditer(passage): | |
c = m.group(0) | |
old_c_ranges = c_ranges | |
# Try to expand ranges... | |
if c_ranges: | |
c_ranges = [range for range in c_ranges if range.process_next(c)] | |
# otherwise, start new ones | |
if not c_ranges: | |
if old_c_ranges: | |
# Takes the first one... might no be the best | |
ranges.append(old_c_ranges[0].limits) | |
c_ranges = [Range(body, m.start(0)) for m in re.finditer(re.escape(c), body)] | |
if not c_ranges: | |
logging.error( | |
"Cannot find character %s in %s", | |
c, | |
doc_id, | |
) | |
logging.error(" [passage] %s", passage) | |
logging.error(" [body] %s", body) | |
raise | |
if c_ranges: | |
ranges.append(c_ranges[0].limits) | |
logging.debug(" ---> %s", passage) | |
logging.debug("ranges: %s", ranges) | |
logging.debug("extracts: %s", [body[start:end] for start, end in ranges]) | |
p_1 = re.sub(r"\s+", "", "".join([body[start:end] for start, end in ranges])) | |
p_2 = re.sub(r"\s+", "", passage) | |
assert p_1 == p_2 | |
passage_ranges.append(ranges) | |
return passage_ranges | |
ITERATORS = { | |
# Generated using the official scripts, adapted to our cases (and dupes) | |
"MARCO_v1": lambda: ir_datasets.load("trec-cast/v2/msmarco").docs_iter(), | |
"WaPo-v2": lambda: ir_datasets.load("trec-cast/v2/wapo").docs_iter(), | |
"KILT-nodupes": lambda: ir_datasets.load("trec-cast/v2/kilt").docs_iter(), | |
# Using the official split | |
"MARCO_v2": lambda: ir_datasets.load("trec-cast/v3/msmarco").docs_iter(), | |
"WaPo": lambda: ir_datasets.load("trec-cast/v3/wapo").docs_iter(), | |
"KILT": lambda: ir_datasets.load("trec-cast/v3/kilt").docs_iter(), | |
} | |
def cli(name: str, jsonlines: Path): | |
"""Computes the ranges based on the official CaST splits | |
jsonlines: path to the folder containing the official jsonl files | |
name: name of the collection to be processed | |
""" | |
core_iter = ITERATORS[name]() | |
duplicates = set() | |
post_duplicates = set() | |
if name == "MARCO_v1": | |
pass | |
with ColonCommaDupes(dlc["v2/dupes/marco_v1"]).stream() as fin: | |
post_duplicates = set(dupe_id.decode().strip() for dupe_id in fin) | |
elif name == "KILT-nodupes": | |
# no duplicates in v2 | |
pass | |
elif name == "WaPo-v2": | |
duplicates = WapoDupes(dlc["v2/dupes/wapo"]).doc_ids | |
else: | |
with dlc["v3/dupes"].stream() as fin: | |
duplicates = set(dupe_id.decode().strip() for dupe_id in fin) | |
feeder = Feeder(name, jsonlines) | |
logging.info("Starting...") | |
for doc in core_iter: | |
if doc.doc_id in duplicates: | |
continue | |
body = doc.passages[0] | |
all_ranges = find_ranges(doc.doc_id, body, feeder.get(doc.doc_id)) | |
computer = md5() | |
for ranges in all_ranges: | |
computer.update(b"\x00") | |
for start, end in ranges: | |
computer.update(b"\x01") | |
computer.update(body[start:end].encode("utf-8")) | |
if doc.doc_id in post_duplicates: | |
# Ignore our work.... arggg | |
continue | |
print( | |
json.dumps( | |
{ | |
"id": doc.doc_id, | |
"ranges": all_ranges, | |
"md5": computer.digest().hex(), | |
}, | |
indent=None, | |
) | |
) | |
logging.info("Finished...") | |
if __name__ == "__main__": | |
cli() | |