Datasets:

Languages:
Korean
Size Categories:
1K<n<10K
ArXiv:
Tags:
art
License:
Soyoung commited on
Commit
da570dc
1 Parent(s): 8131ae5

Create dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +812 -0
dataset.py ADDED
@@ -0,0 +1,812 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import os.path as osp
4
+ import json
5
+ import numpy as np
6
+ # from konlpy.tag import Okt
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from torch.utils.data import Dataset, DataLoader
11
+
12
+ from tqdm import tqdm
13
+
14
+ from utils import seed_worker
15
+ import pprint
16
+
17
+
18
+ def load_data(args,
19
+ config=None, config_kor=None, config_han=None,
20
+ tokenizer=None, tokenizer_kor=None, tokenizer_han=None,
21
+ split="train"):
22
+
23
+ if args.joint:
24
+ dataset = JointDataset(args, config_kor, config_han, tokenizer_kor, tokenizer_han, split)
25
+
26
+ else:
27
+ assert args.language in ['korean', 'hanja']
28
+
29
+ if args.language == 'korean':
30
+ dataset = KoreanDataset(args, config, tokenizer, split)
31
+ elif args.language == 'hanja':
32
+ dataset = HanjaDataset(args, config, tokenizer, split)
33
+
34
+ if split == "train":
35
+ dataloader = DataLoader(dataset,
36
+ batch_size=args.train_batch_size,
37
+ collate_fn=dataset.collate_fn,
38
+ worker_init_fn=seed_worker,
39
+ num_workers=args.num_workers,
40
+ shuffle=True,
41
+ drop_last=True,
42
+ pin_memory=True)
43
+ elif split == "valid":
44
+ dataloader = DataLoader(dataset,
45
+ batch_size=args.eval_batch_size,
46
+ collate_fn=dataset.collate_fn,
47
+ shuffle=False,
48
+ drop_last=False,
49
+ pin_memory=True)
50
+ elif split =="test":
51
+ dataloader = DataLoader(dataset,
52
+ batch_size=args.test_batch_size,
53
+ collate_fn=dataset.collate_fn,
54
+ shuffle=False,
55
+ drop_last=False)
56
+ else:
57
+ raise ValueError("Data split must be either train/valid/test.")
58
+
59
+ return dataloader
60
+
61
+
62
+ class JointDataset(Dataset):
63
+
64
+ def __init__(self, args, config_kor, config_han, tokenizer_kor, tokenizer_han, split="train"):
65
+ self.args = args
66
+ self.config_kor = config_kor
67
+ self.config_han = config_han
68
+ self.tokenizer_kor = tokenizer_kor
69
+ self.tokenizer_han = tokenizer_han
70
+ self.split = split
71
+ self.features = []
72
+
73
+ if args.add_emb:
74
+ self.save_dir = osp.join(args.data_dir, f"joint_add_{args.w_kor_emb}")
75
+ else:
76
+ self.save_dir = osp.join(args.data_dir, "joint_concat")
77
+
78
+ self.save_path = osp.join(self.save_dir, f"{args.model_type}+{args.model2_type}_{split}.pt")
79
+ os.makedirs(self.save_dir, exist_ok=True)
80
+
81
+ map_dir = '/'.join(args.data_dir.split('/')[:-1])
82
+
83
+ with open(osp.join(map_dir, "ner_map.json")) as f:
84
+ self.ner_map = json.load(f)
85
+ with open(osp.join(map_dir, "label_map.json")) as f:
86
+ self.label_map = json.load(f)
87
+
88
+ self.load_and_cache_examples()
89
+
90
+
91
+ def load_and_cache_examples(self):
92
+ if osp.exists(self.save_path):
93
+ logging.info(f"Loading features from {self.save_path}")
94
+ self.features = torch.load(self.save_path)
95
+ return
96
+
97
+ cls_token_kor = self.tokenizer_kor.cls_token
98
+ sep_token_kor = self.tokenizer_kor.sep_token
99
+ cls_token_han = self.tokenizer_han.cls_token
100
+ sep_token_han = self.tokenizer_han.sep_token
101
+ num_special_tokens = 2
102
+ num_empty_entity_examples = 0
103
+ num_empty_label_examples = 0
104
+ num_filtered_labels = 0
105
+
106
+ logging.info(f"Creating features from {self.args.data_dir}")
107
+ rootdir = osp.join(self.args.data_dir, f"{self.split}")
108
+
109
+ N_data_problems = 0
110
+
111
+ for json_file in tqdm(os.listdir(rootdir), desc="Converting examples to features"):
112
+ with open(osp.join(rootdir, json_file), encoding='utf-8') as f:
113
+ ex = json.load(f)
114
+
115
+ if len(ex["entity"]) == 0:
116
+ num_empty_entity_examples += 1
117
+ continue
118
+
119
+ if len(ex["relation"]) == 0:
120
+ num_empty_label_examples += 1
121
+ continue
122
+
123
+ ### Tokenize text & cluster entity mentions ###
124
+ entities_kor = [] # list of lists clustering same entity mentions
125
+ entities_han = []
126
+ coref_dict_kor = {} # { coref_type: entity_idx } -> will be used to cluster mentions
127
+ coref_dict_han = {}
128
+ ent2idx_kor = {} # { info: entity_idx } -> map entity to idx
129
+ ent2idx_han = {}
130
+ ent_idx_kor = 0 # unique entity idx
131
+ ent_idx_han = 0
132
+ prev_idx_kor = 1 # skip cls_token idx
133
+ prev_idx_han = 1
134
+ input_tokens_kor = [cls_token_kor]
135
+ input_tokens_han = [cls_token_han]
136
+ long_seq = False
137
+
138
+ for ent in ex["entity"]:
139
+ if (ent["kor"]["type"] == "START" or ent["kor"]["text"] == "" or ent["kor"]["text"] == " " or
140
+ ent["han"]["type"] == "START" or ent["han"]["text"] == "" or ent["han"]["text"] == " "):
141
+ continue
142
+
143
+ if ent["han"]["coref_type"] != ent["kor"]["coref_type"]:
144
+ ent["han"]["coref_type"] = ent["kor"]["coref_type"]
145
+ # when tokenizing, make note of subword idxes
146
+ prev_text_kor = ex["text"]["kor"][prev_idx_kor:ent["kor"]["start"]]
147
+ prev_text_han = ex["text"]["han"][prev_idx_han:ent["han"]["start"]]
148
+ prev_tokens_kor = self.tokenizer_kor.tokenize(prev_text_kor)
149
+ prev_tokens_han = self.tokenizer_han.tokenize(prev_text_han)
150
+ input_tokens_kor += prev_tokens_kor
151
+ input_tokens_han += prev_tokens_han
152
+ start_kor = len(input_tokens_kor)
153
+ start_han = len(input_tokens_han)
154
+ ent_text_kor = ex["text"]["kor"][ent["kor"]["start"]:ent["kor"]["end"]]
155
+ ent_text_han = ex["text"]["han"][ent["han"]["start"]:ent["han"]["end"]]
156
+ ent_tokens_kor = self.tokenizer_kor.tokenize(ent_text_kor)
157
+ ent_tokens_han = self.tokenizer_han.tokenize(ent_text_han)
158
+ if self.args.mark_entities:
159
+ ent_tokens_kor = ["*"] + ent_tokens_kor + ["*"]
160
+ ent_tokens_han = ["*"] + ent_tokens_han + ["*"]
161
+ input_tokens_kor += ent_tokens_kor
162
+ input_tokens_han += ent_tokens_han
163
+ end_kor = len(input_tokens_kor)
164
+ end_han = len(input_tokens_han)
165
+ prev_idx_kor = ent["kor"]["end"]
166
+ prev_idx_han = ent["han"]["end"]
167
+
168
+ if (start_kor > self.args.max_seq_length-num_special_tokens or
169
+ end_kor > self.args.max_seq_length-num_special_tokens or
170
+ start_han > self.args.max_seq_length-num_special_tokens or
171
+ end_han > self.args.max_seq_length-num_special_tokens):
172
+ long_seq = True
173
+ break
174
+
175
+ ent_info_kor = (ent["kor"]["text"], ent["kor"]["start"], ent["kor"]["end"])
176
+ ent_info_han = (ent["han"]["text"], ent["han"]["start"], ent["han"]["end"])
177
+ full_ent_info_kor = (ent["kor"]["text"], ent["kor"]["start"], ent["kor"]["end"], start_kor, end_kor)
178
+ full_ent_info_han = (ent["han"]["text"], ent["han"]["start"], ent["han"]["end"], start_han, end_han)
179
+
180
+ if ent["kor"]["coref_type"]:
181
+ if ent["kor"]["coref_type"] in coref_dict_kor:
182
+ coref_idx = coref_dict_kor[ent["kor"]["coref_type"]]
183
+ ent2idx_kor[ent_info_kor] = coref_idx
184
+ entities_kor[coref_idx].append(full_ent_info_kor)
185
+ else:
186
+ coref_dict_kor[ent["kor"]["coref_type"]] = ent_idx_kor
187
+ ent2idx_kor[ent_info_kor] = ent_idx_kor
188
+ entities_kor.append([full_ent_info_kor])
189
+ ent_idx_kor += 1
190
+ else:
191
+ ent2idx_kor[ent_info_kor] = ent_idx_kor
192
+ entities_kor.append([full_ent_info_kor])
193
+ ent_idx_kor += 1
194
+
195
+ if ent["han"]["coref_type"]:
196
+ if ent["han"]["coref_type"] in coref_dict_han:
197
+ coref_idx = coref_dict_han[ent["han"]["coref_type"]]
198
+ ent2idx_han[ent_info_han] = coref_idx
199
+ entities_han[coref_idx].append(full_ent_info_han)
200
+ else:
201
+ coref_dict_han[ent["han"]["coref_type"]] = ent_idx_han
202
+ ent2idx_han[ent_info_han] = ent_idx_han
203
+ entities_han.append([full_ent_info_han])
204
+ ent_idx_han += 1
205
+ else:
206
+ ent2idx_han[ent_info_han] = ent_idx_han
207
+ entities_han.append([full_ent_info_han])
208
+ ent_idx_han += 1
209
+
210
+ if not long_seq:
211
+ remaining_text_kor = ex["text"]["kor"][prev_idx_kor:]
212
+ remaining_text_han = ex["text"]["han"][prev_idx_han:]
213
+ input_tokens_kor += self.tokenizer_kor.tokenize(remaining_text_kor)
214
+ input_tokens_han += self.tokenizer_han.tokenize(remaining_text_han)
215
+ input_tokens_kor = input_tokens_kor[:self.args.max_seq_length - 1]
216
+ input_tokens_han = input_tokens_han[:self.args.max_seq_length - 1]
217
+ input_tokens_kor += [sep_token_kor]
218
+ input_tokens_han += [sep_token_han]
219
+ input_ids_kor = self.tokenizer_kor.convert_tokens_to_ids(input_tokens_kor)
220
+ input_ids_han = self.tokenizer_han.convert_tokens_to_ids(input_tokens_han)
221
+
222
+ # Pad to max length
223
+ input_ids_kor += [self.config_kor.pad_token_id] * (self.args.max_seq_length - len(input_ids_kor))
224
+ input_ids_han += [self.config_han.pad_token_id] * (self.args.max_seq_length - len(input_ids_han))
225
+ assert len(input_ids_kor) == len(input_ids_han) == self.args.max_seq_length
226
+
227
+ ### entity masks & NERs
228
+ ent_pos_kor, ent_pos_han = [], []
229
+ for ent in entities_kor:
230
+ ent_pos_kor.append([])
231
+ for ment in ent:
232
+ token_start, token_end = ment[3], ment[4]
233
+ ent_pos_kor[-1].append((token_start, token_end))
234
+ for ent in entities_han:
235
+ ent_pos_han.append([])
236
+ for ment in ent:
237
+ token_start, token_end = ment[3], ment[4]
238
+ ent_pos_han[-1].append((token_start, token_end))
239
+
240
+ # debug
241
+ for ent_k, ent_h in zip(ent_pos_kor, ent_pos_han):
242
+ assert len(ent_k) == len(ent_h)
243
+ # print(json_file)
244
+ # pprint.pprint(ex["entity"])
245
+ # print(entities_kor)
246
+ # print(entities_han)
247
+ # break
248
+
249
+
250
+ ### labels ###
251
+ labels = torch.zeros((len(entities_kor), len(entities_kor), self.config_kor.num_labels), dtype=torch.float32)
252
+ for relation in ex["relation"]:
253
+ s1, o1 = relation["kor"]['subject_entity'], relation["kor"]['object_entity']
254
+ s2, o2 = relation["han"]['subject_entity'], relation["han"]['object_entity']
255
+ h_idx = ent2idx_kor.get((s1["text"], s1["start"], s1["end"]), None)
256
+ t_idx = ent2idx_kor.get((o1["text"], o1["start"], o1["end"]), None)
257
+ h_idx2 = ent2idx_han.get((s2["text"], s2["start"], s2["end"]), None)
258
+ t_idx2 = ent2idx_han.get((o2["text"], o2["start"], o2["end"]), None)
259
+ if h_idx is None or t_idx is None:
260
+ num_filtered_labels += 1
261
+ continue
262
+
263
+ # TODO: idx has to match across languages, otherwise the label won't be universal.
264
+ # if h_idx != h_idx2 or t_idx != t_idx2:
265
+ # import pdb; pdb.set_trace()
266
+ # assert h_idx == h_idx2 and t_idx == t_idx2
267
+
268
+ # debugging
269
+ if not( h_idx == h_idx2 and t_idx == t_idx2) :
270
+ # print(f"fname: {json_file}")
271
+ # pprint.pprint(relation)
272
+ N_data_problems += 1
273
+ continue
274
+
275
+ r_idx = self.label_map[relation["kor"]["label"]]
276
+ labels[h_idx, t_idx, r_idx] = 1
277
+
278
+ for h in range(len(entities_kor)):
279
+ for t in range(len(entities_kor)):
280
+ if torch.all(labels[h][t] == 0):
281
+ labels[h][t][0] = 1
282
+
283
+ self.features.append({
284
+ "input_ids_kor": input_ids_kor,
285
+ "input_ids_han": input_ids_han,
286
+ "ent_pos_kor": ent_pos_kor,
287
+ "ent_pos_han": ent_pos_han,
288
+ "labels": labels,
289
+ "entities_kor": entities_kor,
290
+ "entities_han": entities_han,
291
+ "text_kor": ex["text"]["kor"],
292
+ "text_han": ex["text"]["han"]
293
+ })
294
+
295
+ # self.features.append({
296
+ # "input_ids_kor": input_ids_kor,
297
+ # "input_ids_han": input_ids_han,
298
+ # "ent_pos_kor": ent_pos_kor,
299
+ # "ent_pos_han": ent_pos_han,
300
+ # "labels": labels
301
+ # })
302
+
303
+ print(f"# problems in (h_idx == h_idx2 and t_idx == t_idx2) : {N_data_problems}")
304
+
305
+ logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
306
+ logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
307
+ logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
308
+ logging.info(f"Saving features to {self.save_path}")
309
+ torch.save(self.features, self.save_path)
310
+
311
+
312
+ def collate_fn(self, samples):
313
+ input_ids_kor = [x["input_ids_kor"] for x in samples]
314
+ input_ids_han = [x["input_ids_han"] for x in samples]
315
+ ent_pos_kor = [x["ent_pos_kor"] for x in samples]
316
+ ent_pos_han = [x["ent_pos_han"] for x in samples]
317
+ labels = [x["labels"].view(-1, self.config_kor.num_labels) for x in samples]
318
+
319
+ input_ids_kor = torch.tensor(input_ids_kor, dtype=torch.long)
320
+ input_ids_han = torch.tensor(input_ids_han, dtype=torch.long)
321
+ labels = torch.cat(labels, dim=0)
322
+
323
+ if not self.args.do_analysis:
324
+ return {"input_ids_kor": input_ids_kor,
325
+ "input_ids_han": input_ids_han,
326
+ "ent_pos_kor": ent_pos_kor,
327
+ "ent_pos_han": ent_pos_han,
328
+ "labels": labels}
329
+
330
+ elif self.args.do_analysis:
331
+
332
+ entities_kor = [x["entities_kor"] for x in samples]
333
+ entities_han = [x["entities_han"] for x in samples]
334
+ text_kor = [x["text_kor"] for x in samples]
335
+ text_han = [x["text_han"] for x in samples]
336
+
337
+ return {"input_ids_kor": input_ids_kor,
338
+ "input_ids_han": input_ids_han,
339
+ "ent_pos_kor": ent_pos_kor,
340
+ "ent_pos_han": ent_pos_han,
341
+ "labels": labels,
342
+ "entities_kor": entities_kor,
343
+ "entities_han": entities_han,
344
+ "text_kor": text_kor,
345
+ "text_han": text_han
346
+ }
347
+
348
+
349
+ def __len__(self):
350
+ return len(self.features)
351
+
352
+ def __getitem__(self, idx):
353
+ return self.features[idx]
354
+
355
+
356
+ class KoreanDataset(Dataset):
357
+
358
+ def __init__(self, args, config, tokenizer, split="train"):
359
+ self.args = args
360
+ self.config = config
361
+ self.tokenizer = tokenizer
362
+ self.split = split
363
+ self.features = []
364
+
365
+ # self.word_tokenizer = Okt()
366
+
367
+ self.save_dir = osp.join(args.data_dir, args.language)
368
+ self.save_path = osp.join(self.save_dir, f"{args.model_type}_{split}.pt")
369
+ os.makedirs(self.save_dir, exist_ok=True)
370
+
371
+ map_dir = '/'.join(args.data_dir.split('/')[:-1])
372
+
373
+ with open(osp.join(map_dir, "ner_map.json")) as f:
374
+ self.ner_map = json.load(f)
375
+ with open(osp.join(map_dir, "label_map.json")) as f:
376
+ self.label_map = json.load(f)
377
+
378
+ self.load_and_cache_examples()
379
+
380
+ def load_and_cache_examples(self):
381
+ if osp.exists(self.save_path):
382
+ logging.info(f"Loading features from {self.save_path}")
383
+ self.features = torch.load(self.save_path)
384
+ return
385
+
386
+ cls_token = self.tokenizer.cls_token
387
+ sep_token = self.tokenizer.sep_token
388
+ num_special_tokens = 2
389
+ num_empty_entity_examples = 0
390
+ num_empty_label_examples = 0
391
+ num_filtered_labels = 0
392
+
393
+ logging.info(f"Creating features from {self.args.data_dir}")
394
+ rootdir = osp.join(self.args.data_dir, f"{self.split}")
395
+ # print(f"Current directory: {rootdir}")
396
+
397
+ for json_file in tqdm(os.listdir(rootdir), desc="Converting examples to features"):
398
+ with open(osp.join(rootdir, json_file), encoding='utf-8') as f:
399
+ ex = json.load(f)
400
+
401
+ if len(ex["entity"]) == 0:
402
+ num_empty_entity_examples += 1
403
+ continue
404
+
405
+ if len(ex["relation"]) == 0:
406
+ num_empty_label_examples += 1
407
+ continue
408
+
409
+ ### Tokenize text & cluster entity mentions ###
410
+ entities = [] # list of lists clustering same entity mentions
411
+ coref_dict = {} # { coref_type: entity_idx } -> will be used to cluster mentions
412
+ ent2idx = {} # { info: entity_idx } -> map entity to idx
413
+ ent_idx = 0 # unique entity idx
414
+ prev_idx = 1 # skip cls_token idx
415
+ input_tokens = [cls_token]
416
+ long_seq = False
417
+
418
+ for ent in ex["entity"]:
419
+ ent = ent['kor']
420
+ if ent["type"] == "START" or ent["text"] == "" or ent["text"] == " ":
421
+ continue
422
+ # when tokenizing, make note of subword idxes
423
+ prev_text = ex["text"]["kor"][prev_idx:ent["start"]]
424
+ prev_tokens = self.tokenizer.tokenize(prev_text)
425
+ input_tokens += prev_tokens
426
+ start = len(input_tokens)
427
+ ent_text = ex["text"]["kor"][ent["start"]:ent["end"]]
428
+ ent_tokens = self.tokenizer.tokenize(ent_text)
429
+ if self.args.mark_entities:
430
+ ent_tokens = ["*"] + ent_tokens + ["*"]
431
+ input_tokens += ent_tokens
432
+ end = len(input_tokens)
433
+ prev_idx = ent["end"]
434
+
435
+ # Skip entity mentions that appear beyond the truncated text
436
+ if (start > self.args.max_seq_length-num_special_tokens or
437
+ end > self.args.max_seq_length-num_special_tokens):
438
+ long_seq = True
439
+ break
440
+
441
+ # this tuple will be used to identify entity
442
+ ent_info = (ent["text"], ent["start"], ent["end"], ent["type"])
443
+ full_ent_info = (ent["text"], ent["start"], ent["end"], start, end, ent["type"])
444
+
445
+ if ent["coref_type"]:
446
+ if ent["coref_type"] in coref_dict:
447
+ coref_idx = coref_dict[ent["coref_type"]]
448
+ ent2idx[ent_info] = coref_idx
449
+ entities[coref_idx].append(full_ent_info)
450
+ else:
451
+ coref_dict[ent["coref_type"]] = ent_idx
452
+ ent2idx[ent_info] = ent_idx
453
+ entities.append([full_ent_info])
454
+ ent_idx += 1
455
+ else:
456
+ ent2idx[ent_info] = ent_idx
457
+ entities.append([full_ent_info])
458
+ ent_idx += 1
459
+
460
+ if not long_seq:
461
+ remaining_text = ex["text"]["kor"][prev_idx:]
462
+ input_tokens += self.tokenizer.tokenize(remaining_text)
463
+ input_tokens = input_tokens[:self.args.max_seq_length - 1] # truncation
464
+ input_tokens += [sep_token]
465
+ input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
466
+
467
+ # Pad to max length to enable sparse attention in bigbird
468
+ input_ids += [self.config.pad_token_id] * (self.args.max_seq_length - len(input_ids))
469
+ assert len(input_ids) == self.args.max_seq_length
470
+
471
+ ### entity masks & NERs
472
+ ent_pos, ent_ner = [], []
473
+ for ent in entities:
474
+ ent_pos.append([])
475
+ # ent_ner.append([])
476
+ for ment in ent:
477
+ token_start, token_end = ment[3], ment[4]
478
+ ent_pos[-1].append((token_start, token_end))
479
+ # ent_ner[-1].append(ment[-1])
480
+
481
+ # ent_masks, ent_ners = [], []
482
+ # for ent in entities:
483
+ # ent_mask = np.zeros(len(input_ids), dtype=np.float32)
484
+ # ent_ner = np.zeros(len(input_ids), dtype=np.float32)
485
+
486
+ # for ment in ent:
487
+ # start, end = ment[3], ment[4]
488
+ # # Skip entity mentions that appear beyond the truncated text
489
+ # if (start > self.args.max_seq_length-num_special_tokens or
490
+ # end > self.args.max_seq_length-num_special_tokens):
491
+ # continue
492
+ # ent_mask[start:end] = 1
493
+ # ent_ner[start:end] = self.ner_map[ment[5]]
494
+
495
+ # assert ent_mask.sum() != 0
496
+
497
+ # ent_masks.append(ent_mask)
498
+ # ent_ners.append(ent_ner)
499
+
500
+ # ent_masks = np.stack(ent_masks, axis=0)
501
+ # ent_ners = np.stack(ent_ners, axis=0)
502
+
503
+ ### labels ###
504
+ labels = torch.zeros((len(entities), len(entities), self.config.num_labels), dtype=torch.float32)
505
+ for relation in ex["relation"]:
506
+ relation = relation['kor']
507
+ s, o = relation['subject_entity'], relation['object_entity']
508
+ h_idx = ent2idx.get((s["text"], s["start"], s["end"], s["type"]), None)
509
+ t_idx = ent2idx.get((o["text"], o["start"], o["end"], o["type"]), None)
510
+ if h_idx is None or t_idx is None:
511
+ num_filtered_labels += 1
512
+ continue
513
+ r_idx = self.label_map[relation["label"]]
514
+ labels[h_idx, t_idx, r_idx] = 1
515
+
516
+ for h in range(len(entities)):
517
+ for t in range(len(entities)):
518
+ if torch.all(labels[h][t] == 0):
519
+ labels[h][t][0] = 1
520
+
521
+ ### label mask ###
522
+ # label_mask = np.ones((len(entities), len(entities)), dtype='bool')
523
+ # np.fill_diagonal(label_mask, 0) # ignore diagonals
524
+
525
+ # TODO: normalize ent_masks (test normalization vs. not)
526
+ # ent_masks = ent_masks / np.expand_dims(ent_masks.sum(1), axis=1)
527
+
528
+ self.features.append({
529
+ "input_ids": input_ids,
530
+ "ent_pos": ent_pos,
531
+ "labels": labels,
532
+ })
533
+
534
+ # self.features.append({
535
+ # "input_ids": input_ids,
536
+ # "ent_masks": ent_masks,
537
+ # "ent_ners": ent_ners,
538
+ # "labels": labels,
539
+ # "label_mask": label_mask
540
+ # })
541
+
542
+ logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
543
+ logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
544
+ logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
545
+ logging.info(f"Saving features to {self.save_path}")
546
+ torch.save(self.features, self.save_path)
547
+
548
+
549
+ def collate_fn(self, samples):
550
+ input_ids = [x["input_ids"] for x in samples]
551
+
552
+ ent_pos = [x["ent_pos"] for x in samples]
553
+ # max_ent_len = max([len(x["ent_pos"]) for x in samples])
554
+ # ent_masks = [F.pad(torch.from_numpy(x["ent_masks"]), \
555
+ # (0, 0, 0, max_ent_len-x["ent_masks"].shape[0])) for x in samples]
556
+ # ent_ners = [F.pad(torch.from_numpy(x["ent_ners"]), \
557
+ # (0, 0, 0, max_ent_len-x["ent_ners"].shape[0])) for x in samples]
558
+
559
+ labels = [x["labels"].view(-1, self.config.num_labels) for x in samples]
560
+ # labels = [F.pad(torch.from_numpy(x["labels"]), \
561
+ # (0, 0, 0, max_ent_len-x["labels"].shape[0], 0, max_ent_len-x["labels"].shape[1]), value=-100) for x in samples]
562
+ # label_mask = [F.pad(torch.from_numpy(x["label_mask"]), \
563
+ # (0, max_ent_len-x["label_mask"].shape[0], 0, max_ent_len-x["label_mask"].shape[1])) for x in samples]
564
+
565
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
566
+ # ent_masks = torch.stack(ent_masks, dim=0)
567
+ labels = torch.cat(labels, dim=0)
568
+ # labels = torch.stack(labels, dim=0)
569
+ # label_mask = torch.stack(label_mask, dim=0)
570
+
571
+ return {"input_ids": input_ids,
572
+ "ent_pos": ent_pos,
573
+ # "ent_masks": ent_masks,
574
+ # "ent_ners": ent_ners,
575
+ "labels": labels,
576
+ # "label_mask": label_mask,
577
+ }
578
+
579
+ def __len__(self):
580
+ return len(self.features)
581
+
582
+ def __getitem__(self, idx):
583
+ return self.features[idx]
584
+
585
+
586
+
587
+ class HanjaDataset(Dataset):
588
+
589
+ def __init__(self, args, config, tokenizer, split="train"):
590
+ self.args = args
591
+ self.config = config
592
+ self.tokenizer = tokenizer
593
+ self.split = split
594
+ self.features = []
595
+
596
+ self.save_dir = osp.join(args.data_dir, args.language)
597
+ self.save_path = osp.join(self.save_dir, f"{args.model_type}_{split}.pt")
598
+ os.makedirs(self.save_dir, exist_ok=True)
599
+
600
+
601
+ map_dir = '/'.join(args.data_dir.split('/')[:-1])
602
+
603
+ with open(osp.join(map_dir, "ner_map.json")) as f:
604
+ self.ner_map = json.load(f)
605
+ with open(osp.join(map_dir, "label_map.json")) as f:
606
+ self.label_map = json.load(f)
607
+
608
+ self.load_and_cache_examples()
609
+
610
+
611
+ def load_and_cache_examples(self):
612
+ if osp.exists(self.save_path):
613
+ logging.info(f"Loading features from {self.save_path}")
614
+ self.features = torch.load(self.save_path)
615
+ return
616
+
617
+ cls_token = self.tokenizer.cls_token
618
+ sep_token = self.tokenizer.sep_token
619
+ num_special_tokens = 2
620
+ num_empty_entity_examples = 0
621
+ num_empty_label_examples = 0
622
+ num_filtered_labels = 0
623
+
624
+ logging.info(f"Creating features from {self.args.data_dir}")
625
+ rootdir = osp.join(self.args.data_dir, f"{self.split}")
626
+ # print(f"Current directory: {rootdir}")
627
+
628
+ for json_file in tqdm(os.listdir(rootdir), desc="Converting examples to features"):
629
+ with open(osp.join(rootdir, json_file), encoding='utf-8') as f:
630
+ ex = json.load(f)
631
+
632
+ if len(ex["entity"]) == 0:
633
+ num_empty_entity_examples += 1
634
+ continue
635
+
636
+ if len(ex["relation"]) == 0:
637
+ num_empty_label_examples += 1
638
+ continue
639
+ ### Tokenize text & cluster entity mentions ###
640
+ entities = [] # list of lists clustering same entity mentions
641
+ coref_dict = {} # { coref_type: entity_idx } -> will be used to cluster mentions
642
+ ent2idx = {} # { info: entity_idx } -> map entity to idx
643
+ ent_idx = 0 # unique entity idx
644
+ prev_idx = 1 # skip cls_token idx
645
+ input_tokens = [cls_token]
646
+ long_seq = False
647
+
648
+ for ent in ex["entity"]:
649
+ ent = ent['han']
650
+ if ent["type"] == "START" or ent["text"] == "" or ent["text"] == " ":
651
+ continue
652
+ # when tokenizing, make note of subword idxes
653
+ prev_text = ex["text"]['han'][prev_idx:ent["start"]]
654
+ prev_tokens = self.tokenizer.tokenize(prev_text)
655
+ input_tokens += prev_tokens
656
+ start = len(input_tokens)
657
+ ent_text = ex["text"]['han'][ent["start"]:ent["end"]]
658
+ ent_tokens = self.tokenizer.tokenize(ent_text)
659
+ if self.args.mark_entities:
660
+ ent_tokens = ["*"] + ent_tokens + ["*"]
661
+ input_tokens += ent_tokens
662
+ end = len(input_tokens)
663
+ prev_idx = ent["end"]
664
+
665
+ # Skip entity mentions that appear beyond the truncated text
666
+ if (start > self.args.max_seq_length-num_special_tokens or
667
+ end > self.args.max_seq_length-num_special_tokens):
668
+ long_seq = True
669
+ break
670
+
671
+ # this tuple will be used to identify entity
672
+ ent_info = (ent["text"], ent["start"], ent["end"], ent["type"])
673
+ full_ent_info = (ent["text"], ent["start"], ent["end"], start, end, ent["type"])
674
+
675
+ if ent["coref_type"]:
676
+ if ent["coref_type"] in coref_dict:
677
+ coref_idx = coref_dict[ent["coref_type"]]
678
+ ent2idx[ent_info] = coref_idx
679
+ entities[coref_idx].append(full_ent_info)
680
+ else:
681
+ coref_dict[ent["coref_type"]] = ent_idx
682
+ ent2idx[ent_info] = ent_idx
683
+ entities.append([full_ent_info])
684
+ ent_idx += 1
685
+ else:
686
+ ent2idx[ent_info] = ent_idx
687
+ entities.append([full_ent_info])
688
+ ent_idx += 1
689
+
690
+ if not long_seq:
691
+ remaining_text = ex["text"]['han'][prev_idx:]
692
+ input_tokens += self.tokenizer.tokenize(remaining_text)
693
+ input_tokens = input_tokens[:self.args.max_seq_length - 1] # truncation
694
+ input_tokens += [sep_token]
695
+ input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
696
+
697
+ # Pad to max length to enable sparse attention in bigbird
698
+ input_ids += [self.config.pad_token_id] * (self.args.max_seq_length - len(input_ids))
699
+ assert len(input_ids) == self.args.max_seq_length
700
+
701
+ ### entity masks & NERs
702
+ ent_pos, ent_ner = [], []
703
+ for ent in entities:
704
+ ent_pos.append([])
705
+ # ent_ner.append([])
706
+ for ment in ent:
707
+ token_start, token_end = ment[3], ment[4]
708
+ ent_pos[-1].append((token_start, token_end))
709
+ # ent_ner[-1].append(ment[-1])
710
+
711
+ # ent_masks, ent_ners = [], []
712
+ # for ent in entities:
713
+ # ent_mask = np.zeros(len(input_ids), dtype=np.float32)
714
+ # ent_ner = np.zeros(len(input_ids), dtype=np.float32)
715
+
716
+ # for ment in ent:
717
+ # start, end = ment[3], ment[4]
718
+ # # Skip entity mentions that appear beyond the truncated text
719
+ # if (start > self.args.max_seq_length-num_special_tokens or
720
+ # end > self.args.max_seq_length-num_special_tokens):
721
+ # continue
722
+ # ent_mask[start:end] = 1
723
+ # ent_ner[start:end] = self.ner_map[ment[5]]
724
+
725
+ # assert ent_mask.sum() != 0
726
+
727
+ # ent_masks.append(ent_mask)
728
+ # ent_ners.append(ent_ner)
729
+
730
+ # ent_masks = np.stack(ent_masks, axis=0)
731
+ # ent_ners = np.stack(ent_ners, axis=0)
732
+
733
+ ### labels ###
734
+ labels = torch.zeros((len(entities), len(entities), self.config.num_labels), dtype=torch.float32)
735
+ for relation in ex["relation"]:
736
+ r_idx = self.label_map[relation["label"]]
737
+ relation = relation['han']
738
+ s, o = relation['subject_entity'], relation['object_entity']
739
+ h_idx = ent2idx.get((s["text"], s["start"], s["end"], s["type"]), None)
740
+ t_idx = ent2idx.get((o["text"], o["start"], o["end"], o["type"]), None)
741
+ if h_idx is None or t_idx is None:
742
+ num_filtered_labels += 1
743
+ continue
744
+ labels[h_idx, t_idx, r_idx] = 1
745
+
746
+ for h in range(len(entities)):
747
+ for t in range(len(entities)):
748
+ if torch.all(labels[h][t] == 0):
749
+ labels[h][t][0] = 1
750
+
751
+ ### label mask ###
752
+ # label_mask = np.ones((len(entities), len(entities)), dtype='bool')
753
+ # np.fill_diagonal(label_mask, 0) # ignore diagonals
754
+
755
+ # TODO: normalize ent_masks (test normalization vs. not)
756
+ # ent_masks = ent_masks / np.expand_dims(ent_masks.sum(1), axis=1)
757
+
758
+ self.features.append({
759
+ "input_ids": input_ids,
760
+ "ent_pos": ent_pos,
761
+ "labels": labels,
762
+ })
763
+
764
+ # self.features.append({
765
+ # "input_ids": input_ids,
766
+ # "ent_masks": ent_masks,
767
+ # "ent_ners": ent_ners,
768
+ # "labels": labels,
769
+ # "label_mask": label_mask
770
+ # })
771
+
772
+ logging.info(f"# of empty entity examples filtered: {num_empty_entity_examples}")
773
+ logging.info(f"# of empty label examples filtered: {num_empty_label_examples}")
774
+ logging.info(f"# of beyond-truncated-text labels filtered: {num_filtered_labels}")
775
+ logging.info(f"Saving features to {self.save_path}")
776
+ torch.save(self.features, self.save_path)
777
+
778
+ def collate_fn(self, samples):
779
+ input_ids = [x["input_ids"] for x in samples]
780
+
781
+ ent_pos = [x["ent_pos"] for x in samples]
782
+ # max_ent_len = max([len(x["ent_pos"]) for x in samples])
783
+ # ent_masks = [F.pad(torch.from_numpy(x["ent_masks"]), \
784
+ # (0, 0, 0, max_ent_len-x["ent_masks"].shape[0])) for x in samples]
785
+ # ent_ners = [F.pad(torch.from_numpy(x["ent_ners"]), \
786
+ # (0, 0, 0, max_ent_len-x["ent_ners"].shape[0])) for x in samples]
787
+
788
+ labels = [x["labels"].view(-1, self.config.num_labels) for x in samples]
789
+ # labels = [F.pad(torch.from_numpy(x["labels"]), \
790
+ # (0, 0, 0, max_ent_len-x["labels"].shape[0], 0, max_ent_len-x["labels"].shape[1]), value=-100) for x in samples]
791
+ # label_mask = [F.pad(torch.from_numpy(x["label_mask"]), \
792
+ # (0, max_ent_len-x["label_mask"].shape[0], 0, max_ent_len-x["label_mask"].shape[1])) for x in samples]
793
+
794
+ input_ids = torch.tensor(input_ids, dtype=torch.long)
795
+ # ent_masks = torch.stack(ent_masks, dim=0)
796
+ labels = torch.cat(labels, dim=0)
797
+ # labels = torch.stack(labels, dim=0)
798
+ # label_mask = torch.stack(label_mask, dim=0)
799
+
800
+ return {"input_ids": input_ids,
801
+ "ent_pos": ent_pos,
802
+ # "ent_masks": ent_masks,
803
+ # "ent_ners": ent_ners,
804
+ "labels": labels,
805
+ # "label_mask": label_mask,
806
+ }
807
+
808
+ def __len__(self):
809
+ return len(self.features)
810
+
811
+ def __getitem__(self, idx):
812
+ return self.features[idx]