bpiwowar commited on
Commit
933c653
1 Parent(s): 6f20920

Upload find_chunks.py

Browse files
Files changed (1) hide show
  1. find_chunks.py +199 -0
find_chunks.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ from pathlib import Path
4
+ import re
5
+ from typing import Dict, List, Tuple
6
+
7
+ import click
8
+ import ir_datasets
9
+ from hashlib import md5
10
+ from ir_datasets.datasets.trec_cast import (
11
+ NAME,
12
+ ColonCommaDupes,
13
+ WapoDupes,
14
+ DownloadConfig,
15
+ )
16
+
17
+ logging.basicConfig(level=logging.INFO)
18
+
19
+ base_path = ir_datasets.util.home_path() / NAME
20
+ dlc = DownloadConfig.context(NAME, base_path)
21
+
22
+ RE_TOKEN_MATCHER = re.compile(r"\S")
23
+
24
+
25
+ class Feeder:
26
+ def __init__(self, base: str, mainpath: Path):
27
+ self.feeders = [self.feeder(path) for path in mainpath.glob(f"{base}_*.jsonl")]
28
+ self.current = [next(feeder) for feeder in self.feeders]
29
+
30
+ def feeder(self, path: Path):
31
+ with path.open("rt") as fp:
32
+ for line in fp:
33
+ data = json.loads(line)
34
+ yield f"{data['id']}", [content["body"] for content in data["contents"]]
35
+ while True:
36
+ yield "", ""
37
+
38
+ def get(self, doc_id: str):
39
+ for ix, (key, body) in enumerate(self.current):
40
+ if key == doc_id:
41
+ self.current[ix] = next(self.feeders[ix])
42
+ return body
43
+ assert False, f"Could not find {doc_id}"
44
+
45
+
46
+ class Range:
47
+ def __init__(self, text: str, pos: int):
48
+ self.text = text
49
+ self.start = pos
50
+ self.end = pos + 1
51
+
52
+ def process_next(self, target: str):
53
+ ix = self.end
54
+ while ix < len(self.text):
55
+ c = self.text[ix]
56
+ ix += 1
57
+
58
+ if not c.isspace():
59
+ if c == target:
60
+ self.end = ix
61
+ return True
62
+ return False
63
+
64
+ return None
65
+
66
+ @property
67
+ def limits(self):
68
+ return self.start, self.end
69
+
70
+
71
+ def find_ranges(doc_id: str, body: str, passages: List[str]):
72
+ # Due to a bug in chunking in CaST, we need a clever strategy
73
+ # to recover ranges... so we find words from the passages
74
+
75
+ passage_ranges: List[List[Tuple[int, int]]] = []
76
+
77
+ for passage in passages:
78
+ passage = passage.strip()
79
+
80
+ c_ranges: List[Range] = []
81
+ ranges: List[Tuple[int, int]] = []
82
+
83
+ for m in RE_TOKEN_MATCHER.finditer(passage):
84
+ c = m.group(0)
85
+ old_c_ranges = c_ranges
86
+
87
+ # Try to expand ranges...
88
+ if c_ranges:
89
+ c_ranges = [range for range in c_ranges if range.process_next(c)]
90
+
91
+ # otherwise, start new ones
92
+ if not c_ranges:
93
+ if old_c_ranges:
94
+ # Takes the first one... might no be the best
95
+ ranges.append(old_c_ranges[0].limits)
96
+
97
+ c_ranges = [Range(body, m.start(0)) for m in re.finditer(re.escape(c), body)]
98
+
99
+ if not c_ranges:
100
+ logging.error(
101
+ "Cannot find character %s in %s",
102
+ c,
103
+ doc_id,
104
+ )
105
+ logging.error(" [passage] %s", passage)
106
+ logging.error(" [body] %s", body)
107
+ raise
108
+
109
+ if c_ranges:
110
+ ranges.append(c_ranges[0].limits)
111
+
112
+ logging.debug(" ---> %s", passage)
113
+ logging.debug("ranges: %s", ranges)
114
+ logging.debug("extracts: %s", [body[start:end] for start, end in ranges])
115
+
116
+ p_1 = re.sub(r"\s+", "", "".join([body[start:end] for start, end in ranges]))
117
+ p_2 = re.sub(r"\s+", "", passage)
118
+
119
+ assert p_1 == p_2
120
+
121
+ passage_ranges.append(ranges)
122
+
123
+ return passage_ranges
124
+
125
+
126
+ ITERATORS = {
127
+ # Generated using the official scripts, adapted to our cases (and dupes)
128
+ "MARCO_v1": lambda: ir_datasets.load("trec-cast/v2/msmarco").docs_iter(),
129
+ "WaPo-v2": lambda: ir_datasets.load("trec-cast/v2/wapo").docs_iter(),
130
+ "KILT-nodupes": lambda: ir_datasets.load("trec-cast/v2/kilt").docs_iter(),
131
+ # Using the official split
132
+ "MARCO_v2": lambda: ir_datasets.load("trec-cast/v3/msmarco").docs_iter(),
133
+ "WaPo": lambda: ir_datasets.load("trec-cast/v3/wapo").docs_iter(),
134
+ "KILT": lambda: ir_datasets.load("trec-cast/v3/kilt").docs_iter(),
135
+ }
136
+
137
+
138
+ @click.argument("name", type=click.Choice(list(ITERATORS.keys())))
139
+ @click.argument("jsonlines", type=Path)
140
+ @click.command()
141
+ def cli(name: str, jsonlines: Path):
142
+ """Computes the ranges based on the official CaST splits
143
+
144
+ jsonlines: path to the folder containing the official jsonl files
145
+ name: name of the collection to be processed
146
+ """
147
+ core_iter = ITERATORS[name]()
148
+
149
+ duplicates = set()
150
+ post_duplicates = set()
151
+ if name == "MARCO_v1":
152
+ pass
153
+ with ColonCommaDupes(dlc["v2/dupes/marco_v1"]).stream() as fin:
154
+ post_duplicates = set(dupe_id.decode().strip() for dupe_id in fin)
155
+ elif name == "KILT-nodupes":
156
+ # no duplicates in v2
157
+ pass
158
+ elif name == "WaPo-v2":
159
+ duplicates = WapoDupes(dlc["v2/dupes/wapo"]).doc_ids
160
+ else:
161
+ with dlc["v3/dupes"].stream() as fin:
162
+ duplicates = set(dupe_id.decode().strip() for dupe_id in fin)
163
+
164
+ feeder = Feeder(name, jsonlines)
165
+
166
+ logging.info("Starting...")
167
+ for doc in core_iter:
168
+ if doc.doc_id in duplicates:
169
+ continue
170
+
171
+ body = doc.passages[0]
172
+ all_ranges = find_ranges(doc.doc_id, body, feeder.get(doc.doc_id))
173
+
174
+ computer = md5()
175
+ for ranges in all_ranges:
176
+ computer.update(b"\x00")
177
+ for start, end in ranges:
178
+ computer.update(b"\x01")
179
+ computer.update(body[start:end].encode("utf-8"))
180
+
181
+ if doc.doc_id in post_duplicates:
182
+ # Ignore our work.... arggg
183
+ continue
184
+
185
+ print(
186
+ json.dumps(
187
+ {
188
+ "id": doc.doc_id,
189
+ "ranges": all_ranges,
190
+ "md5": computer.digest().hex(),
191
+ },
192
+ indent=None,
193
+ )
194
+ )
195
+
196
+ logging.info("Finished...")
197
+
198
+ if __name__ == "__main__":
199
+ cli()