qanastek commited on
Commit
efb5260
1 Parent(s): cb905ba

Update ESSAI.py

Browse files
Files changed (1) hide show
  1. ESSAI.py +218 -61
ESSAI.py CHANGED
@@ -41,28 +41,63 @@ _LICENSE = 'Data User Agreement'
41
 
42
  class ESSAI(datasets.GeneratorBasedBuilder):
43
 
44
- DEFAULT_CONFIG_NAME = "source"
45
 
46
  BUILDER_CONFIGS = [
47
- datasets.BuilderConfig(name="source", version="1.0.0", description="The ESSAI corpora"),
 
 
 
 
 
 
48
  ]
49
 
50
  def _info(self):
51
 
52
- features = datasets.Features(
53
- {
54
- "id": datasets.Value("string"),
55
- "document_id": datasets.Value("string"),
56
- "tokens": [datasets.Value("string")],
57
- "lemmas": [datasets.Value("string")],
58
- "pos_tags": [datasets.features.ClassLabel(
59
- names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
60
- )],
61
- "label": datasets.features.ClassLabel(
62
- names = ['negation', 'speculation'],
63
- ),
64
- }
65
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  return datasets.DatasetInfo(
68
  description=_DESCRIPTION,
@@ -111,56 +146,178 @@ class ESSAI(datasets.GeneratorBasedBuilder):
111
 
112
  key = 0
113
 
114
- for file in ["ESSAI_neg.txt", "ESSAI_spec.txt"]:
115
 
116
- label = "negation" if "neg" in file else "speculation"
117
- id_docs = []
118
- id_words = []
119
- words = []
120
- lemmas = []
121
- POS_tags = []
122
 
123
- with open(os.path.join(datadir, file)) as f:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
- for line in f.readlines():
126
 
127
- if len(line.split("\t")) < 5:
128
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
- id_doc, id_word, word, lemma, tag = line.split("\t")[0:5]
131
-
132
- id_docs.append(id_doc)
133
- id_words.append(id_word)
134
- words.append(word)
135
- lemmas.append(lemma)
136
- POS_tags.append(tag)
137
-
138
- dic = {
139
- "id_docs": np.array(list(map(int, id_docs))),
140
- "id_words": id_words,
141
- "words": words,
142
- "lemmas": lemmas,
143
- "POS_tags": POS_tags,
144
- }
145
-
146
- for doc_id in set(dic["id_docs"]):
147
-
148
- indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
149
- tokens = [dic["words"][id] for id in indexes]
150
- text_lemmas = [dic["lemmas"][id] for id in indexes]
151
- pos_tags = [dic["POS_tags"][id] for id in indexes]
152
-
153
- all_res.append({
154
- "id": key,
155
- "document_id": doc_id,
156
- "tokens": tokens,
157
- "lemmas": text_lemmas,
158
- "pos_tags": pos_tags,
159
- "label": label,
160
- })
161
-
162
- key += 1
163
-
164
  ids = [r["id"] for r in all_res]
165
 
166
  random.seed(4)
 
41
 
42
  class ESSAI(datasets.GeneratorBasedBuilder):
43
 
44
+ DEFAULT_CONFIG_NAME = "pos_spec"
45
 
46
  BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(name="pos", version="1.0.0", description="The ESSAI corpora - POS Speculation task"),
48
+
49
+ datasets.BuilderConfig(name="cls_spec", version="1.0.0", description="The ESSAI corpora - CLS Speculation task"),
50
+ datasets.BuilderConfig(name="cls_neg", version="1.0.0", description="The ESSAI corpora - CLS Negation task"),
51
+
52
+ datasets.BuilderConfig(name="ner_spec", version="1.0.0", description="The ESSAI corpora - NER Speculation task"),
53
+ datasets.BuilderConfig(name="ner_neg", version="1.0.0", description="The ESSAI corpora - NER Negation task"),
54
  ]
55
 
56
  def _info(self):
57
 
58
+ if self.config.name.find("pos") != -1:
59
+
60
+ features = datasets.Features(
61
+ {
62
+ "id": datasets.Value("string"),
63
+ "document_id": datasets.Value("string"),
64
+ "tokens": [datasets.Value("string")],
65
+ "lemmas": [datasets.Value("string")],
66
+ "pos_tags": [datasets.Value("string")],
67
+ # "pos_tags": [datasets.features.ClassLabel(
68
+ # names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
69
+ # )],
70
+ }
71
+ )
72
+
73
+ elif self.config.name.find("cls") != -1:
74
+
75
+ features = datasets.Features(
76
+ {
77
+ "id": datasets.Value("string"),
78
+ "document_id": datasets.Value("string"),
79
+ "tokens": [datasets.Value("string")],
80
+ "label": datasets.Value("string"),
81
+ # "label": datasets.features.ClassLabel(
82
+ # names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
83
+ # ),
84
+ }
85
+ )
86
+
87
+ elif self.config.name.find("ner") != -1:
88
+
89
+ features = datasets.Features(
90
+ {
91
+ "id": datasets.Value("string"),
92
+ "document_id": datasets.Value("string"),
93
+ "tokens": [datasets.Value("string")],
94
+ "lemmas": [datasets.Value("string")],
95
+ "ner_tags": [datasets.Value("string")],
96
+ # "ner_tags": [datasets.features.ClassLabel(
97
+ # names = ['VER:pper', 'VER:subi', 'VER:cond', 'INT', 'VER:infi', 'PUN:cit', 'ITAC', 'PUN', 'VER:ppre', 'VER:pres', 'PRO:REL', 'ADJ', 'VER:subp', 'NN', 'PREF', 'PRP', 'PRO:IND', 'PRO:POS', 'DET:POS', 'VER:futu', 'PRO:DEM', 'KON', 'DET:ART', 'VER:', 'PRP:det', 'PRO', 'FAG', 'NOM', 'SYM', 'VER:impf', 'CIT02-HM', 'SENT', 'Bayer', 'VER:simp', 'ADV', 'bayer', '@card@', 'PRO:PER', 'NUM', 'ABR', 'NAM'],
98
+ # )],
99
+ }
100
+ )
101
 
102
  return datasets.DatasetInfo(
103
  description=_DESCRIPTION,
 
146
 
147
  key = 0
148
 
149
+ subset = self.config.name.split("_")[-1]
150
 
151
+ unique_id_doc = []
 
 
 
 
 
152
 
153
+ if self.config.name.find("pos") != -1:
154
+ docs = ["ESSAI_neg.txt", "ESSAI_spec.txt"]
155
+ else:
156
+ docs = [f"ESSAI_{subset}.txt"]
157
+
158
+ for file in docs:
159
+
160
+ filename = os.path.join(datadir, file)
161
+
162
+ if self.config.name.find("pos") != -1:
163
+
164
+ id_docs = []
165
+ id_words = []
166
+ words = []
167
+ lemmas = []
168
+ POS_tags = []
169
+
170
+ with open(filename) as f:
171
+
172
+ for line in f.readlines():
173
+
174
+ splitted = line.split("\t")
175
+
176
+ if len(splitted) < 5:
177
+ continue
178
+
179
+ id_doc, id_word, word, lemma, tag = splitted[0:5]
180
+ if len(splitted) >= 8:
181
+ tag = splitted[6]
182
+
183
+ if tag == "@card@":
184
+ print(splitted)
185
+
186
+ if lemma == "000" and tag == "@card@":
187
+ tag = "NUM"
188
+ word = "100 000"
189
+ lemma = "100 000"
190
+ elif lemma == "45" and tag == "@card@":
191
+ tag = "NUM"
192
+
193
+ # if id_doc in id_docs:
194
+ # continue
195
+
196
+ id_docs.append(id_doc)
197
+ id_words.append(id_word)
198
+ words.append(word)
199
+ lemmas.append(lemma)
200
+ POS_tags.append(tag)
201
+
202
+ dic = {
203
+ "id_docs": np.array(list(map(int, id_docs))),
204
+ "id_words": id_words,
205
+ "words": words,
206
+ "lemmas": lemmas,
207
+ "POS_tags": POS_tags,
208
+ }
209
+
210
+ for doc_id in set(dic["id_docs"]):
211
+
212
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
213
+ tokens = [dic["words"][id] for id in indexes]
214
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
215
+ pos_tags = [dic["POS_tags"][id] for id in indexes]
216
+
217
+ if doc_id not in unique_id_doc:
218
+
219
+ all_res.append({
220
+ "id": str(doc_id),
221
+ "document_id": doc_id,
222
+ "tokens": tokens,
223
+ "lemmas": text_lemmas,
224
+ "pos_tags": pos_tags,
225
+ })
226
+ unique_id_doc.append(doc_id)
227
+
228
+ # key += 1
229
+
230
+ elif self.config.name.find("ner") != -1:
231
+
232
+ id_docs = []
233
+ id_words = []
234
+ words = []
235
+ lemmas = []
236
+ ner_tags = []
237
+
238
+ with open(filename) as f:
239
+
240
+ for line in f.readlines():
241
+
242
+ if len(line.split("\t")) < 5:
243
+ continue
244
+
245
+ id_doc, id_word, word, lemma, _ = line.split("\t")[0:5]
246
+ tag = line.replace("\n","").split("\t")[-1]
247
+ if tag == "***" or tag == "_":
248
+ tag = "O"
249
+ elif tag == "v":
250
+ tag = "I_scope_spec"
251
+ elif tag == "z":
252
+ tag = "O"
253
+
254
+ id_docs.append(id_doc)
255
+ id_words.append(id_word)
256
+ words.append(word)
257
+ lemmas.append(lemma)
258
+ ner_tags.append(tag)
259
+
260
+ dic = {
261
+ "id_docs": np.array(list(map(int, id_docs))),
262
+ "id_words": id_words,
263
+ "words": words,
264
+ "lemmas": lemmas,
265
+ "ner_tags": ner_tags,
266
+ }
267
+
268
+ for doc_id in set(dic["id_docs"]):
269
+
270
+ indexes = np.argwhere(dic["id_docs"] == doc_id)[:, 0]
271
+ tokens = [dic["words"][id] for id in indexes]
272
+ text_lemmas = [dic["lemmas"][id] for id in indexes]
273
+ ner_tags = [dic["ner_tags"][id] for id in indexes]
274
+
275
+ all_res.append({
276
+ "id": key,
277
+ "document_id": doc_id,
278
+ "tokens": tokens,
279
+ "lemmas": text_lemmas,
280
+ "ner_tags": ner_tags,
281
+ })
282
+
283
+ key += 1
284
+
285
+ elif self.config.name.find("cls") != -1:
286
+
287
+ f_in = open(filename, "r")
288
+ conll = [
289
+ [b.split("\t") for b in a.split("\n")]
290
+ for a in f_in.read().split("\n\n")
291
+ ]
292
+ f_in.close()
293
+
294
+ classe = "negation" if self.config.name.find("neg") != -1 else "speculation"
295
+
296
+ all_res = []
297
+
298
+ for document in conll:
299
+
300
+ if document == [""]:
301
+ continue
302
 
303
+ identifier = document[0][0]
304
 
305
+ unique = list(set([w[-1] for w in document]))
306
+ # print(document)
307
+ tokens = [sent[2] for sent in document if len(sent) > 1]
308
+
309
+ if "***" in unique:
310
+ l = "none"
311
+ elif "_" in unique:
312
+ l = classe
313
+
314
+ all_res.append({
315
+ "id": str(identifier),
316
+ "document_id": identifier,
317
+ "tokens": tokens,
318
+ "label": l,
319
+ })
320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
  ids = [r["id"] for r in all_res]
322
 
323
  random.seed(4)