qanastek commited on
Commit
1b9eed4
1 Parent(s): d4f2789

Update CAS.py

Browse files
Files changed (1) hide show
  1. CAS.py +244 -59
CAS.py CHANGED
@@ -61,28 +61,67 @@ _LICENSE = 'Data User Agreement'
61
 
62
  class CAS(datasets.GeneratorBasedBuilder):
63
 
64
- DEFAULT_CONFIG_NAME = "source"
65
 
66
  BUILDER_CONFIGS = [
67
- datasets.BuilderConfig(name="source", version="1.0.0", description="The CAS corpora"),
 
 
 
 
 
68
  ]
69
 
70
  def _info(self):
71
 
72
- features = datasets.Features(
73
- {
74
- "id": datasets.Value("string"),
75
- "document_id": datasets.Value("string"),
76
- "tokens": [datasets.Value("string")],
77
- "lemmas": [datasets.Value("string")],
78
- "pos_tags": [datasets.features.ClassLabel(
79
- names = ['VER:ppre', 'VER:infi', 'VER:impf', 'VER:simp', 'PUN', 'DET:POS', 'ADV', 'DET:ART', 'PRO:DEM', 'INT', 'VER:futu', 'VER:subp', 'VER:cond', 'VER:pper', 'KON', 'NAM', 'PRO:IND', 'VER:con', 'PRP', 'SYM', 'SENT', 'PUN:cit', 'VER:pres', 'PRP:det', 'PRO:REL', 'PRO:PER', 'VER:subi', 'ADJ', 'NUM', 'NOM', 'ABR'],
80
- )],
81
- "label": datasets.features.ClassLabel(
82
- names = ['negation', 'speculation'],
83
- ),
84
- }
85
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  return datasets.DatasetInfo(
88
  description=_DESCRIPTION,
@@ -131,55 +170,201 @@ class CAS(datasets.GeneratorBasedBuilder):
131
 
132
  key = 0
133
 
134
- for file in ["CAS_neg.txt", "CAS_spec.txt"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
- label = "negation" if "neg" in file else "speculation"
137
- id_docs = []
138
- id_words = []
139
- words = []
140
- lemmas = []
141
- POS_tags = []
142
 
143
- with open(os.path.join(datadir, file)) as f:
 
 
 
 
 
 
 
144
 
145
- for line in f.readlines():
146
 
147
- if len(line.split("\t")) < 5:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  continue
149
 
150
- id_doc, id_word, word, lemma, tag = line.split("\t")[0:5]
151
-
152
- id_docs.append(id_doc)
153
- id_words.append(id_word)
154
- words.append(word)
155
- lemmas.append(lemma)
156
- POS_tags.append(tag)
157
-
158
- dic = {
159
- "id_docs": np.array(list(map(int, id_docs))),
160
- "id_words": id_words,
161
- "words": words,
162
- "lemmas": lemmas,
163
- "POS_tags": POS_tags,
164
- }
165
-
166
- for doc_id in set(dic["id_docs"]):
167
-
168
- indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
169
- tokens = [dic["words"][id] for id in indexes]
170
- text_lemmas = [dic["lemmas"][id] for id in indexes]
171
- pos_tags = [dic["POS_tags"][id] for id in indexes]
172
-
173
- all_res.append({
174
- "id": str(key),
175
- "document_id": str(doc_id),
176
- "tokens": tokens,
177
- "lemmas": text_lemmas,
178
- "pos_tags": pos_tags,
179
- "label": label,
180
- })
181
-
182
- key += 1
183
 
184
  ids = [r["id"] for r in all_res]
185
 
 
61
 
62
  class CAS(datasets.GeneratorBasedBuilder):
63
 
64
+ DEFAULT_CONFIG_NAME = "pos_spec"
65
 
66
  BUILDER_CONFIGS = [
67
+ datasets.BuilderConfig(name="pos", version="1.0.0", description="The CAS corpora - POS Speculation task"),
68
+
69
+ datasets.BuilderConfig(name="cls", version="1.0.0", description="The CAS corpora - CLS Negation / Speculation task"),
70
+
71
+ datasets.BuilderConfig(name="ner_spec", version="1.0.0", description="The CAS corpora - NER Speculation task"),
72
+ datasets.BuilderConfig(name="ner_neg", version="1.0.0", description="The CAS corpora - NER Negation task"),
73
  ]
74
 
75
  def _info(self):
76
 
77
+ if self.config.name.find("pos") != -1:
78
+
79
+ features = datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "document_id": datasets.Value("string"),
83
+ "tokens": [datasets.Value("string")],
84
+ "lemmas": [datasets.Value("string")],
85
+ # "pos_tags": [datasets.Value("string")],
86
+ "pos_tags": [datasets.features.ClassLabel(
87
+ names = ['INT', 'PRO:DEM', 'VER:impf', 'VER:ppre', 'PRP:det', 'KON', 'VER:pper', 'PRP', 'PRO:IND', 'VER:simp', 'VER:con', 'SENT', 'VER:futu', 'PRO:PER', 'VER:infi', 'ADJ', 'NAM', 'NUM', 'PUN:cit', 'PRO:REL', 'VER:subi', 'ABR', 'NOM', 'VER:pres', 'DET:ART', 'VER:cond', 'VER:subp', 'DET:POS', 'ADV', 'SYM', 'PUN'],
88
+ )],
89
+ }
90
+ )
91
+
92
+ elif self.config.name.find("cls") != -1:
93
+
94
+ features = datasets.Features(
95
+ {
96
+ "id": datasets.Value("string"),
97
+ "document_id": datasets.Value("string"),
98
+ "tokens": [datasets.Value("string")],
99
+ # "label": datasets.Value("string"),
100
+ "label": datasets.features.ClassLabel(
101
+ names = ['negation_speculation', 'speculation', 'neutral', 'negation'],
102
+ ),
103
+ }
104
+ )
105
+
106
+ elif self.config.name.find("ner") != -1:
107
+
108
+ if self.config.name.find("_spec") != -1:
109
+ names = ['O', 'B_xcope_inc', 'I_xcope_inc']
110
+ elif self.config.name.find("_neg") != -1:
111
+ names = ['O', 'B_scope_neg', 'I_scope_neg']
112
+
113
+ features = datasets.Features(
114
+ {
115
+ "id": datasets.Value("string"),
116
+ "document_id": datasets.Value("string"),
117
+ "tokens": [datasets.Value("string")],
118
+ "lemmas": [datasets.Value("string")],
119
+ # "ner_tags": [datasets.Value("string")],
120
+ "ner_tags": [datasets.features.ClassLabel(
121
+ names = names,
122
+ )],
123
+ }
124
+ )
125
 
126
  return datasets.DatasetInfo(
127
  description=_DESCRIPTION,
 
170
 
171
  key = 0
172
 
173
+ subset = self.config.name.split("_")[-1]
174
+
175
+ unique_id_doc = []
176
+
177
+ if self.config.name.find("ner") != -1:
178
+ docs = [f"CAS_{subset}.txt"]
179
+ else:
180
+ docs = ["CAS_neg.txt", "CAS_spec.txt"]
181
+
182
+ for file in docs:
183
+
184
+ filename = os.path.join(datadir, file)
185
+
186
+ if self.config.name.find("pos") != -1:
187
+
188
+ id_docs = []
189
+ id_words = []
190
+ words = []
191
+ lemmas = []
192
+ POS_tags = []
193
+
194
+ with open(filename) as f:
195
+
196
+ for line in f.readlines():
197
+
198
+ splitted = line.split("\t")
199
+
200
+ if len(splitted) < 5:
201
+ continue
202
+
203
+ id_doc, id_word, word, lemma, tag = splitted[0:5]
204
+ if len(splitted) >= 8:
205
+ tag = splitted[6]
206
+
207
+ if tag == "@card@":
208
+ print(splitted)
209
+
210
+ if word == "@card@":
211
+ print(splitted)
212
+
213
+ if lemma == "000" and tag == "@card@":
214
+ tag = "NUM"
215
+ word = "100 000"
216
+ lemma = "100 000"
217
+ elif lemma == "45" and tag == "@card@":
218
+ tag = "NUM"
219
+
220
+ # if id_doc in id_docs:
221
+ # continue
222
+
223
+ id_docs.append(id_doc)
224
+ id_words.append(id_word)
225
+ words.append(word)
226
+ lemmas.append(lemma)
227
+ POS_tags.append(tag)
228
+
229
+ dic = {
230
+ "id_docs": np.array(list(map(int, id_docs))),
231
+ "id_words": id_words,
232
+ "words": words,
233
+ "lemmas": lemmas,
234
+ "POS_tags": POS_tags,
235
+ }
236
+
237
+ for doc_id in set(dic["id_docs"]):
238
+
239
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
240
+ tokens = [dic["words"][id] for id in indexes]
241
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
242
+ pos_tags = [dic["POS_tags"][id] for id in indexes]
243
 
244
+ if doc_id not in unique_id_doc:
 
 
 
 
 
245
 
246
+ all_res.append({
247
+ "id": str(doc_id),
248
+ "document_id": doc_id,
249
+ "tokens": tokens,
250
+ "lemmas": text_lemmas,
251
+ "pos_tags": pos_tags,
252
+ })
253
+ unique_id_doc.append(doc_id)
254
 
255
+ # key += 1
256
 
257
+ elif self.config.name.find("ner") != -1:
258
+
259
+ id_docs = []
260
+ id_words = []
261
+ words = []
262
+ lemmas = []
263
+ ner_tags = []
264
+
265
+ with open(filename) as f:
266
+
267
+ for line in f.readlines():
268
+
269
+ if len(line.split("\t")) < 5:
270
+ continue
271
+
272
+ id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
273
+ tag = line.replace("\n","").split("\t")[-1]
274
+
275
+ if tag == "***" or tag == "_":
276
+ tag = "O"
277
+ elif tag == "I_xcope_inc_":
278
+ tag = "I_xcope_inc"
279
+ # elif tag == "v":
280
+ # tag = "I_scope_spec"
281
+ # elif tag == "z":
282
+ # tag = "O"
283
+
284
+ id_docs.append(id_doc)
285
+ id_words.append(id_word)
286
+ words.append(word)
287
+ lemmas.append(lemma)
288
+ ner_tags.append(tag)
289
+
290
+ dic = {
291
+ "id_docs": np.array(list(map(int, id_docs))),
292
+ "id_words": id_words,
293
+ "words": words,
294
+ "lemmas": lemmas,
295
+ "ner_tags": ner_tags,
296
+ }
297
+
298
+ for doc_id in set(dic["id_docs"]):
299
+
300
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
301
+ tokens = [dic["words"][id] for id in indexes]
302
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
303
+ ner_tags = [dic["ner_tags"][id] for id in indexes]
304
+
305
+ all_res.append({
306
+ "id": key,
307
+ "document_id": doc_id,
308
+ "tokens": tokens,
309
+ "lemmas": text_lemmas,
310
+ "ner_tags": ner_tags,
311
+ })
312
+
313
+ key += 1
314
+
315
+ elif self.config.name.find("cls") != -1:
316
+
317
+ f_in = open(filename, "r")
318
+ conll = [
319
+ [b.split("\t") for b in a.split("\n")]
320
+ for a in f_in.read().split("\n\n")
321
+ ]
322
+ f_in.close()
323
+
324
+ classe = "negation" if filename.find("_neg") != -1 else "speculation"
325
+
326
+ for document in conll:
327
+
328
+ if document == [""]:
329
+ continue
330
+
331
+ identifier = document[0][0]
332
+
333
+ unique = list(set([w[-1] for w in document]))
334
+ tokens = [sent[2] for sent in document if len(sent) > 1]
335
+
336
+ if "***" in unique:
337
+ l = "neutral"
338
+ elif "_" in unique:
339
+ l = classe
340
+
341
+ if identifier in unique_id_doc and l == 'neutral':
342
  continue
343
 
344
+ elif identifier in unique_id_doc and l != 'neutral':
345
+
346
+ index_l = unique_id_doc.index(identifier)
347
+
348
+ if all_res[index_l]["label"] != "neutral":
349
+ l = "negation_speculation"
350
+
351
+ all_res[index_l] = {
352
+ "id": str(identifier),
353
+ "document_id": identifier,
354
+ "tokens": tokens,
355
+ "label": l,
356
+ }
357
+
358
+ else:
359
+
360
+ all_res.append({
361
+ "id": str(identifier),
362
+ "document_id": identifier,
363
+ "tokens": tokens,
364
+ "label": l,
365
+ })
366
+
367
+ unique_id_doc.append(identifier)
 
 
 
 
 
 
 
 
 
368
 
369
  ids = [r["id"] for r in all_res]
370