crystina-z commited on
Commit
5080226
1 Parent(s): 2e90bb1

Update xor-tydi.py

Browse files
Files changed (1) hide show
  1. xor-tydi.py +18 -14
xor-tydi.py CHANGED
@@ -122,34 +122,37 @@ class XORTyDi(datasets.GeneratorBasedBuilder):
122
 
123
 
124
  # prepare doc
125
- doc2docid = {}
126
- with open(filepath, encoding="utf-8") as f:
127
- all_data = json.load(f)
 
128
  for i, data in enumerate(all_data):
129
  positive_ctxs = data["positive_ctxs"]
130
  hard_negative_ctxs = data["hard_negative_ctxs"]
131
  ctxs = positive_ctxs + hard_negative_ctxs
132
 
133
- for doc for ctxs:
134
- text = process_doc_text[doc]
135
  if text not in doc2docid:
136
  doc2docid[text] = len(doc2docid)
 
137
 
138
- def process_train_entry(data, _id):
139
  positive_ctxs = data["positive_ctxs"]
140
  hard_negative_ctxs = data["hard_negative_ctxs"]
141
  # each ctx: {'title':... , 'text': ....}
142
 
143
  def process_ctx(ctxs, tag):
144
- text = process_doc_text(doc)
145
- return [
146
- {
 
147
  "title": doc["title"],
148
- "text":
149
  # 'docid': f'{tag}-{i}-{random.randint(*RANGE)}'
150
- 'docid': doc2docid[docid]
151
- } for i, doc in enumerate(ctxs)
152
- ]
153
 
154
  return _id, {
155
  "query_id": _id,
@@ -174,8 +177,9 @@ class XORTyDi(datasets.GeneratorBasedBuilder):
174
  try:
175
  with open(filepath, encoding="utf-8") as f:
176
  all_data = json.load(f)
 
177
  for i, data in enumerate(all_data):
178
- yield process_train_entry(data, i)
179
 
180
  # if filepath.endswith(".jsonl"): <-- doesn't work
181
  except Exception as e:
 
122
 
123
 
124
  # prepare doc
125
+ def get_doc2docid(all_data):
126
+ doc2docid = {}
127
+ # with open(filepath, encoding="utf-8") as f:
128
+ # all_data = json.load(f)
129
  for i, data in enumerate(all_data):
130
  positive_ctxs = data["positive_ctxs"]
131
  hard_negative_ctxs = data["hard_negative_ctxs"]
132
  ctxs = positive_ctxs + hard_negative_ctxs
133
 
134
+ for doc in ctxs:
135
+ text = process_doc_text(doc)
136
  if text not in doc2docid:
137
  doc2docid[text] = len(doc2docid)
138
+ return doc2docid
139
 
140
+ def process_train_entry(data, _id, doc2docid):
141
  positive_ctxs = data["positive_ctxs"]
142
  hard_negative_ctxs = data["hard_negative_ctxs"]
143
  # each ctx: {'title':... , 'text': ....}
144
 
145
  def process_ctx(ctxs, tag):
146
+ processed = []
147
+ for i, doc in enumerate(ctxs):
148
+ text = process_doc_text(doc)
149
+ processed.append({
150
  "title": doc["title"],
151
+ "text": text,
152
  # 'docid': f'{tag}-{i}-{random.randint(*RANGE)}'
153
+ 'docid': doc2docid[text]
154
+ })
155
+ return processed
156
 
157
  return _id, {
158
  "query_id": _id,
 
177
  try:
178
  with open(filepath, encoding="utf-8") as f:
179
  all_data = json.load(f)
180
+ doc2docid = get_doc2docid(all_data)
181
  for i, data in enumerate(all_data):
182
+ yield process_train_entry(data, i, doc2docid)
183
 
184
  # if filepath.endswith(".jsonl"): <-- doesn't work
185
  except Exception as e: