gabrielaltay commited on
Commit
468ebd1
1 Parent(s): b517279

upload hubscripts/meddocan_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. meddocan.py +249 -0
meddocan.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset loading script for the MEDDOCAN corpus.
18
+ The MEDDOCAN datset is a manually annotated collection of clinical case
19
+ reports derived from the Spanish Clinical Case Corpus (SPACCC). It was designed
20
+ for the Medical Document Anonymization Track, the first the first community
21
+ challenge task specifically devoted to the anonymization of medical documents in Spanish
22
+ """
23
+
24
+ import os
25
+ from pathlib import Path
26
+ from typing import Dict, List, Tuple
27
+
28
+ import datasets
29
+
30
+ from .bigbiohub import kb_features
31
+ from .bigbiohub import BigBioConfig
32
+ from .bigbiohub import Tasks
33
+
34
+ _LANGUAGES = ['Spanish']
35
+ _PUBMED = False
36
+ _LOCAL = False
37
+ _CITATION = """\
38
+ @inproceedings{marimon2019automatic,
39
+ title={Automatic De-identification of Medical Texts in Spanish: the MEDDOCAN Track, Corpus, Guidelines, Methods and Evaluation of Results.},
40
+ author={Marimon, Montserrat and Gonzalez-Agirre, Aitor and Intxaurrondo, Ander and Rodriguez, Heidy and Martin, Jose Lopez and Villegas, Marta and Krallinger, Martin},
41
+ booktitle={IberLEF@ SEPLN},
42
+ pages={618--638},
43
+ year={2019}
44
+ }
45
+ """
46
+
47
+ _DATASETNAME = "meddocan"
48
+ _DISPLAYNAME = "MEDDOCAN"
49
+
50
+ _DESCRIPTION = """\
51
+ MEDDOCAN: Medical Document Anonymization Track
52
+
53
+ This dataset is designed for the MEDDOCAN task, sponsored by Plan de Impulso de las Tecnologías del Lenguaje.
54
+
55
+ It is a manually classified collection of 1,000 clinical case reports derived from the \
56
+ Spanish Clinical Case Corpus (SPACCC), enriched with PHI expressions.
57
+
58
+ The annotation of the entire set of entity mentions was carried out by experts annotators\
59
+ and it includes 29 entity types relevant for the annonymiation of medical documents.\
60
+ 22 of these annotation types are actually present in the corpus: TERRITORIO, FECHAS, \
61
+ EDAD_SUJETO_ASISTENCIA, NOMBRE_SUJETO_ASISTENCIA, NOMBRE_PERSONAL_SANITARIO, \
62
+ SEXO_SUJETO_ASISTENCIA, CALLE, PAIS, ID_SUJETO_ASISTENCIA, CORREO, ID_TITULACION_PERSONAL_SANITARIO,\
63
+ ID_ASEGURAMIENTO, HOSPITAL, FAMILIARES_SUJETO_ASISTENCIA, INSTITUCION, ID_CONTACTO ASISTENCIAL,\
64
+ NUMERO_TELEFONO, PROFESION, NUMERO_FAX, OTROS_SUJETO_ASISTENCIA, CENTRO_SALUD, ID_EMPLEO_PERSONAL_SANITARIO
65
+
66
+ For further information, please visit https://temu.bsc.es/meddocan/ or send an email to encargo-pln-life@bsc.es
67
+ """
68
+
69
+
70
+ _HOMEPAGE = "https://temu.bsc.es/meddocan/"
71
+
72
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
73
+
74
+ _URLS = {
75
+ "meddocan": "https://zenodo.org/record/4279323/files/meddocan.zip?download=1",
76
+ }
77
+
78
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
79
+
80
+ _SOURCE_VERSION = "1.0.0"
81
+
82
+ _BIGBIO_VERSION = "1.0.0"
83
+
84
+
85
+ class MeddocanDataset(datasets.GeneratorBasedBuilder):
86
+ """Manually annotated collection of clinical case studies from Spanish medical publications."""
87
+
88
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
89
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
90
+
91
+ BUILDER_CONFIGS = [
92
+ BigBioConfig(
93
+ name="meddocan_source",
94
+ version=SOURCE_VERSION,
95
+ description="Meddocan source schema",
96
+ schema="source",
97
+ subset_id="meddocan",
98
+ ),
99
+ BigBioConfig(
100
+ name="meddocan_bigbio_kb",
101
+ version=BIGBIO_VERSION,
102
+ description="Meddocan BigBio schema",
103
+ schema="bigbio_kb",
104
+ subset_id="meddocan",
105
+ ),
106
+ ]
107
+
108
+ DEFAULT_CONFIG_NAME = "meddocan_source"
109
+
110
+ def _info(self) -> datasets.DatasetInfo:
111
+ if self.config.schema == "source":
112
+ features = datasets.Features(
113
+ {
114
+ "id": datasets.Value("string"),
115
+ "document_id": datasets.Value("string"),
116
+ "text": datasets.Value("string"),
117
+ # "labels": [datasets.Value("string")],
118
+ "text_bound_annotations": [ # T line in brat
119
+ {
120
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
121
+ "text": datasets.Sequence(datasets.Value("string")),
122
+ "type": datasets.Value("string"),
123
+ "id": datasets.Value("string"),
124
+ }
125
+ ],
126
+ "events": [ # E line in brat
127
+ {
128
+ "trigger": datasets.Value("string"),
129
+ "id": datasets.Value("string"),
130
+ "type": datasets.Value("string"),
131
+ "arguments": datasets.Sequence(
132
+ {
133
+ "role": datasets.Value("string"),
134
+ "ref_id": datasets.Value("string"),
135
+ }
136
+ ),
137
+ }
138
+ ],
139
+ "relations": [ # R line in brat
140
+ {
141
+ "id": datasets.Value("string"),
142
+ "head": {
143
+ "ref_id": datasets.Value("string"),
144
+ "role": datasets.Value("string"),
145
+ },
146
+ "tail": {
147
+ "ref_id": datasets.Value("string"),
148
+ "role": datasets.Value("string"),
149
+ },
150
+ "type": datasets.Value("string"),
151
+ }
152
+ ],
153
+ "equivalences": [ # Equiv line in brat
154
+ {
155
+ "id": datasets.Value("string"),
156
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
157
+ }
158
+ ],
159
+ "attributes": [ # M or A lines in brat
160
+ {
161
+ "id": datasets.Value("string"),
162
+ "type": datasets.Value("string"),
163
+ "ref_id": datasets.Value("string"),
164
+ "value": datasets.Value("string"),
165
+ }
166
+ ],
167
+ "normalizations": [ # N lines in brat
168
+ {
169
+ "id": datasets.Value("string"),
170
+ "type": datasets.Value("string"),
171
+ "ref_id": datasets.Value("string"),
172
+ "resource_name": datasets.Value("string"),
173
+ "cuid": datasets.Value("string"),
174
+ "text": datasets.Value("string"),
175
+ }
176
+ ],
177
+ },
178
+ )
179
+
180
+ elif self.config.schema == "bigbio_kb":
181
+ features = kb_features
182
+
183
+ return datasets.DatasetInfo(
184
+ description=_DESCRIPTION,
185
+ features=features,
186
+ homepage=_HOMEPAGE,
187
+ license=str(_LICENSE),
188
+ citation=_CITATION,
189
+ )
190
+
191
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
192
+ """
193
+ Downloads/extracts the data to generate the train, validation and test splits.
194
+ Each split is created by instantiating a `datasets.SplitGenerator`, which will
195
+ call `this._generate_examples` with the keyword arguments in `gen_kwargs`.
196
+ """
197
+
198
+ data_dir = dl_manager.download_and_extract(_URLS["meddocan"])
199
+
200
+ return [
201
+ datasets.SplitGenerator(
202
+ name=datasets.Split.TRAIN,
203
+ gen_kwargs={
204
+ "filepath": Path(os.path.join(data_dir, "meddocan/train/brat")),
205
+ "split": "train",
206
+ },
207
+ ),
208
+ datasets.SplitGenerator(
209
+ name=datasets.Split.TEST,
210
+ gen_kwargs={
211
+ "filepath": Path(os.path.join(data_dir, "meddocan/test/brat")),
212
+ "split": "test",
213
+ },
214
+ ),
215
+ datasets.SplitGenerator(
216
+ name=datasets.Split.VALIDATION,
217
+ gen_kwargs={
218
+ "filepath": Path(os.path.join(data_dir, "meddocan/dev/brat")),
219
+ "split": "dev",
220
+ },
221
+ ),
222
+ ]
223
+
224
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
225
+ """
226
+ This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
227
+ Method parameters are unpacked from `gen_kwargs` as given in `_split_generators`.
228
+ """
229
+
230
+ txt_files = sorted(list(filepath.glob("*txt")))
231
+ # tsv_files = sorted(list(filepaths[1].glob("*tsv")))
232
+
233
+ if self.config.schema == "source":
234
+ for guid, txt_file in enumerate(txt_files):
235
+ example = parsing.parse_brat_file(txt_file)
236
+
237
+ example["id"] = str(guid)
238
+ yield guid, example
239
+
240
+ elif self.config.schema == "bigbio_kb":
241
+ for guid, txt_file in enumerate(txt_files):
242
+ example = parsing.brat_parse_to_bigbio_kb(
243
+ parsing.parse_brat_file(txt_file)
244
+ )
245
+ example["id"] = str(guid)
246
+ yield guid, example
247
+
248
+ else:
249
+ raise ValueError(f"Invalid config: {self.config.name}")