Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
3a1ce81
·
1 Parent(s): 3bcd8b6

upload hubscripts/n2c2_2014_deid_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. n2c2_2014_deid.py +295 -0
n2c2_2014_deid.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset loader for the n2c2 2014 Deidentification & Heart Disease.
18
+ https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/
19
+ The dataset consists of 3 archive files,
20
+ * 2014_training-PHI-Gold-Set1.tar.gz
21
+ * training-PHI-Gold-Set2.tar.gz
22
+ * testing-PHI-Gold-fixed.tar.gz
23
+ Each tar.gz contain a set of .xml files. One .xml per clinical report.
24
+ The file names follow a consistent pattern with the first set of digits identifying the
25
+ patient and the last set of digits identifying the sequential record number
26
+ ie: XXX-YY.xml
27
+ where XXX is the patient number, and YY is the record number.
28
+ Example: 320-03.xml
29
+ This is the third (03) record for patient 320
30
+ Each file has a root level xml node which will contain a
31
+ <TEXT> node that holds the medical annotation text and a <TAGS> node containing
32
+ annotations for the document text.
33
+ The files comprising this dataset must be on the users local machine
34
+ in a single directory that is passed to `datasets.load_datset` via
35
+ the `data_dir` kwarg. This loader script will read the archive files
36
+ directly (i.e. the user should not uncompress, untar or unzip any of
37
+ the files). For example, if the following directory structure exists
38
+ on the users local machine,
39
+ n2c2_2014
40
+ ├── 2014_training-PHI-Gold-Set1.tar.gz
41
+ ├── training-PHI-Gold-Set2.tar.gz
42
+ ├── testing-PHI-Gold-fixed.tar.gz
43
+ Data Access
44
+ from https://www.i2b2.org/NLP/DataSets/Main.php
45
+ "As always, you must register AND submit a DUA for access. If you previously
46
+ accessed the data sets here on i2b2.org, you will need to set a new password
47
+ for your account on the Data Portal, but your original DUA will be retained."
48
+ Made in collaboration with @JoaoRacedo
49
+ """
50
+
51
+ import itertools as it
52
+ import os
53
+ import re
54
+ import tarfile
55
+ import xml.etree.ElementTree as et
56
+ from typing import Dict, List, Tuple
57
+
58
+ import datasets
59
+
60
+ from .bigbiohub import kb_features
61
+ from .bigbiohub import BigBioConfig
62
+ from .bigbiohub import Tasks
63
+
64
+ _LANGUAGES = ['English']
65
+ _PUBMED = False
66
+ _LOCAL = True
67
+ _CITATION = """\
68
+ @article{stubbs2015automated,
69
+ title = {Automated systems for the de-identification of longitudinal
70
+ clinical narratives: Overview of 2014 i2b2/UTHealth shared task Track 1},
71
+ journal = {Journal of Biomedical Informatics},
72
+ volume = {58},
73
+ pages = {S11-S19},
74
+ year = {2015},
75
+ issn = {1532-0464},
76
+ doi = {https://doi.org/10.1016/j.jbi.2015.06.007},
77
+ url = {https://www.sciencedirect.com/science/article/pii/S1532046415001173},
78
+ author = {Amber Stubbs and Christopher Kotfila and Özlem Uzuner}
79
+ }
80
+ """
81
+
82
+ _DATASETNAME = "n2c2_2014_deid"
83
+ _DISPLAYNAME = "n2c2 2014 De-identification"
84
+
85
+ _DESCRIPTION = """\
86
+ The 2014 i2b2/UTHealth Natural Language Processing (NLP) shared task featured two tracks.
87
+ The first of these was the de-identification track focused on identifying protected health
88
+ information (PHI) in longitudinal clinical narratives.
89
+
90
+ TRACK 1: NER PHI\n
91
+ HIPAA requires that patient medical records have all identifying information removed in order to
92
+ protect patient privacy. There are 18 categories of Protected Health Information (PHI) identifiers of the
93
+ patient or of relatives, employers, or household members of the patient that must be removed in order
94
+ for a file to be considered de-identified.
95
+ In order to de-identify the records, each file has PHI marked up. All PHI has an
96
+ XML tag indicating its category and type, where applicable. For the purposes of this task,
97
+ the 18 HIPAA categories have been grouped into 6 main categories and 25 sub categories
98
+ """
99
+
100
+ _HOMEPAGE = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/"
101
+
102
+ _LICENSE = 'Data User Agreement'
103
+
104
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
105
+
106
+ _SOURCE_VERSION = "1.0.0"
107
+ _BIGBIO_VERSION = "1.0.0"
108
+
109
+
110
+ class N2C22014DeidDataset(datasets.GeneratorBasedBuilder):
111
+ """n2c2 2014 Deidentification Challenge"""
112
+
113
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
114
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
115
+
116
+ BUILDER_CONFIGS = [
117
+ BigBioConfig(
118
+ name="n2c2_2014_source",
119
+ version=SOURCE_VERSION,
120
+ description="n2c2_2014 source schema",
121
+ schema="source",
122
+ subset_id="n2c2_2014_deid",
123
+ ),
124
+ BigBioConfig(
125
+ name="n2c2_2014_bigbio_kb",
126
+ version=BIGBIO_VERSION,
127
+ description="n2c2_2014 BigBio schema",
128
+ schema="bigbio_kb",
129
+ subset_id="n2c2_2014_deid",
130
+ ),
131
+ ]
132
+
133
+ DEFAULT_CONFIG_NAME = "n2c2_2014_deid_source"
134
+
135
+ def _info(self) -> datasets.DatasetInfo:
136
+
137
+ if self.config.schema == "source":
138
+ features = datasets.Features(
139
+ {
140
+ "id": datasets.Value("string"),
141
+ "document_id": datasets.Value("string"),
142
+ "text": datasets.Value("string"),
143
+ "phi": [
144
+ {
145
+ "id": datasets.Value("string"),
146
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
147
+ "type": datasets.Value("string"),
148
+ "text": datasets.Value("string"),
149
+ "comment": datasets.Value("string"),
150
+ }
151
+ ],
152
+ },
153
+ )
154
+
155
+ elif self.config.schema == "bigbio_kb":
156
+ features = kb_features
157
+
158
+ return datasets.DatasetInfo(
159
+ description=_DESCRIPTION,
160
+ features=features,
161
+ homepage=_HOMEPAGE,
162
+ license=str(_LICENSE),
163
+ citation=_CITATION,
164
+ )
165
+
166
+ def _split_generators(
167
+ self, dl_manager: datasets.DownloadManager
168
+ ) -> List[datasets.SplitGenerator]:
169
+ """Returns SplitGenerators."""
170
+ if self.config.data_dir is None:
171
+ raise ValueError(
172
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
173
+ )
174
+ else:
175
+ data_dir = self.config.data_dir
176
+ return [
177
+ datasets.SplitGenerator(
178
+ name=datasets.Split.TRAIN,
179
+ gen_kwargs={
180
+ "data_dir": data_dir,
181
+ "file_names": [
182
+ ("2014_training-PHI-Gold-Set1.tar.gz", "track1"),
183
+ ("training-PHI-Gold-Set2.tar.gz", "track1"),
184
+ ],
185
+ },
186
+ ),
187
+ datasets.SplitGenerator(
188
+ name=datasets.Split.TEST,
189
+ gen_kwargs={
190
+ "data_dir": data_dir,
191
+ "file_names": [
192
+ ("testing-PHI-Gold-fixed.tar.gz", "track1"),
193
+ ],
194
+ },
195
+ ),
196
+ ]
197
+
198
+ def _generate_examples(self, data_dir, file_names: List[Tuple]) -> Tuple[int, Dict]:
199
+ """Yields examples as (key, example) tuples."""
200
+
201
+ if self.config.schema == "source":
202
+ uid = it.count(0)
203
+ for fname, task in file_names:
204
+ full_path = os.path.join(data_dir, fname)
205
+ for x in self._read_tar_gz(full_path):
206
+ xml_flag = x["xml_flag"]
207
+ if xml_flag:
208
+ document = self._read_task1_file(
209
+ file_object=x["file_object"], file_name=x["file_name"]
210
+ )
211
+ document["id"] = next(uid)
212
+
213
+ elif self.config.schema == "bigbio_kb":
214
+ uid = it.count(0)
215
+ for fname, task in file_names:
216
+ full_path = os.path.join(data_dir, fname)
217
+ for x in self._read_tar_gz(full_path):
218
+ xml_flag = x["xml_flag"]
219
+ if xml_flag:
220
+ document = self._read_task1_file(
221
+ file_object=x["file_object"], file_name=x["file_name"]
222
+ )
223
+ document["id"] = next(uid)
224
+ entity_list = document.pop("phi")
225
+ full_text = document.pop("text")
226
+ entities_ = []
227
+ for entity in entity_list:
228
+ entities_.append(
229
+ {
230
+ "id": next(uid),
231
+ "type": entity["type"],
232
+ "text": entity["text"],
233
+ "offsets": entity["offsets"],
234
+ "normalized": entity["normalized"],
235
+ }
236
+ )
237
+ document["entities"] = entities_
238
+
239
+ document["passages"] = [
240
+ {
241
+ "id": next(uid),
242
+ "type": "full_text",
243
+ "text": [full_text],
244
+ "offsets": [[0, len(full_text)]],
245
+ },
246
+ ]
247
+
248
+ # additional fields required that can be empty
249
+ document["relations"] = []
250
+ document["events"] = []
251
+ document["coreferences"] = []
252
+ yield document["document_id"], document
253
+ else:
254
+ raise ValueError(f"Invalid config: {self.config.name}")
255
+
256
+ def _read_tar_gz(self, fpath: str) -> Dict:
257
+ """
258
+ Read .tar.gz file
259
+ """
260
+ # Open tar file
261
+ tf = tarfile.open(fpath, "r:gz")
262
+
263
+ for tf_member in tf.getmembers():
264
+ file_object = tf.extractfile(tf_member)
265
+ name = tf_member.name
266
+ file_name = os.path.basename(name).split(".")[0]
267
+ if re.search(r"\.xml", name) is not None:
268
+ xml_flag = True
269
+ else:
270
+ xml_flag = False
271
+ yield {
272
+ "file_object": file_object,
273
+ "file_name": file_name,
274
+ "xml_flag": xml_flag,
275
+ }
276
+
277
+ def _read_task1_file(self, file_object, file_name):
278
+ xmldoc = et.parse(file_object).getroot()
279
+ entities = xmldoc.findall("TAGS")[0]
280
+ text = xmldoc.findall("TEXT")[0].text
281
+ phi = []
282
+ for entity in entities:
283
+ phi.append(
284
+ {
285
+ "id": entity.attrib["id"],
286
+ "offsets": [[entity.attrib["start"], entity.attrib["end"]]],
287
+ "type": entity.attrib["TYPE"],
288
+ "text": [entity.attrib["text"]],
289
+ "comment": entity.attrib["comment"],
290
+ "normalized": [],
291
+ }
292
+ )
293
+
294
+ document = {"document_id": file_name, "text": text, "phi": phi}
295
+ return document