qanastek commited on
Commit
5ad9707
1 Parent(s): d5c6f90

Update PxCorpus.py

Browse files
Files changed (1) hide show
  1. PxCorpus.py +124 -115
PxCorpus.py CHANGED
@@ -1,170 +1,179 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """DIAMED"""
16
 
17
  import os
18
- import json
19
- import math
20
 
21
  import datasets
22
 
23
- _DESCRIPTION = """\
24
- DIAMED
25
- """
26
 
27
- _HOMEPAGE = ""
28
 
29
- _LICENSE = "Apache License 2.0"
 
 
 
 
 
 
 
 
30
 
31
- _URL = "https://huggingface.co/datasets/Dr-BERT/DiaMED/resolve/main/data.zip"
 
 
32
 
33
- _CITATION = """\
 
 
 
34
 
 
 
35
  """
36
 
37
- class DiaMed(datasets.GeneratorBasedBuilder):
38
- """DIAMED"""
39
 
40
- VERSION = datasets.Version("1.0.0")
41
 
42
  BUILDER_CONFIGS = [
43
- datasets.BuilderConfig(name=f"default", version="1.0.0", description=f"DiaMED data"),
44
  ]
45
 
46
  DEFAULT_CONFIG_NAME = "default"
47
 
48
  def _info(self):
49
-
50
  features = datasets.Features(
51
  {
52
- "identifier": datasets.Value("string"),
53
- "title": datasets.Value("string"),
54
- "clinical_case": datasets.Value("string"),
55
- "topic": datasets.Value("string"),
56
- "keywords": datasets.Sequence(
57
- datasets.Value("string"),
58
  ),
59
- "domains": datasets.Sequence(
60
- datasets.Value("string"),
 
 
 
61
  ),
62
- "collected_at": datasets.Value("string"),
63
- "published_at": datasets.Value("string"),
64
- "source_url": datasets.Value("string"),
65
- "source_name": datasets.Value("string"),
66
- "license": datasets.Value("string"),
67
- "figures_urls": datasets.Sequence(
68
- datasets.Value("string"),
69
- ),
70
- "figures_paths": datasets.Sequence(
71
- datasets.Value("string"),
72
- ),
73
- "figures": datasets.Sequence(
74
- datasets.Image(),
75
- ),
76
- "icd-10": datasets.features.ClassLabel(names=[
77
- 'A00-B99 Certain infectious and parasitic diseases',
78
- 'C00-D49 Neoplasms',
79
- 'D50-D89 Diseases of the blood and blood-forming organs and certain disorders involving the immune mechanism',
80
- 'E00-E89 Endocrine, nutritional and metabolic diseases',
81
- 'F01-F99 Mental, Behavioral and Neurodevelopmental disorders',
82
- 'G00-G99 Diseases of the nervous system',
83
- 'H00-H59 Diseases of the eye and adnexa',
84
- 'H60-H95 Diseases of the ear and mastoid process',
85
- 'I00-I99 Diseases of the circulatory system',
86
- 'J00-J99 Diseases of the respiratory system',
87
- 'K00-K95 Diseases of the digestive system',
88
- 'L00-L99 Diseases of the skin and subcutaneous tissue',
89
- 'M00-M99 Diseases of the musculoskeletal system and connective tissue',
90
- 'N00-N99 Diseases of the genitourinary system',
91
- 'O00-O9A Pregnancy, childbirth and the puerperium',
92
- 'P00-P96 Certain conditions originating in the perinatal period',
93
- 'Q00-Q99 Congenital malformations, deformations and chromosomal abnormalities',
94
- 'R00-R99 Symptoms, signs and abnormal clinical and laboratory findings, not elsewhere classified',
95
- 'S00-T88 Injury, poisoning and certain other consequences of external causes',
96
- 'U00-U85 Codes for special purposes',
97
- 'V00-Y99 External causes of morbidity',
98
- 'Z00-Z99 Factors influencing health status and contact with health services',
99
- ]),
100
  }
101
  )
102
 
103
  return datasets.DatasetInfo(
104
  description=_DESCRIPTION,
105
  features=features,
106
- homepage=_HOMEPAGE,
107
- license=_LICENSE,
108
  citation=_CITATION,
 
109
  )
110
 
111
  def _split_generators(self, dl_manager):
112
- """Returns SplitGenerators."""
113
 
114
  data_dir = dl_manager.download_and_extract(_URL)
115
- print("#"*50)
116
- print(data_dir)
117
- # data_dir = "./splits/"
118
 
 
 
119
  return [
120
  datasets.SplitGenerator(
121
  name=datasets.Split.TRAIN,
122
  gen_kwargs={
123
- "base_path": data_dir,
124
- "filepath": data_dir + "/splits/train.json",
 
 
125
  },
126
  ),
127
  datasets.SplitGenerator(
128
  name=datasets.Split.VALIDATION,
129
  gen_kwargs={
130
- "base_path": data_dir,
131
- "filepath": data_dir + "/splits/validation.json",
 
 
132
  },
133
  ),
134
  datasets.SplitGenerator(
135
  name=datasets.Split.TEST,
136
  gen_kwargs={
137
- "base_path": data_dir,
138
- "filepath": data_dir + "/splits/test.json",
 
 
139
  },
140
  ),
141
  ]
142
 
143
- def _generate_examples(self, base_path, filepath):
144
-
145
- with open(filepath, encoding="utf-8") as f:
146
-
147
- data = json.load(f)
148
-
149
- for key, d in enumerate(data):
150
-
151
- if str(d["icd-10"]) == "nan" or d["icd-10"].find("Plusieurs cas cliniques") != -1 or d["icd-10"].find("Aucune annotation") != -1:
152
- continue
153
-
154
- yield key, {
155
- "identifier": d["identifier"],
156
- "title": d["title"],
157
- "clinical_case": d["clinical_case"],
158
- "topic": d["topic"],
159
- "keywords": d["keywords"],
160
- "domains": d["domain"],
161
- "collected_at": d["collected_at"],
162
- "published_at": d["published_at"],
163
- "source_url": d["source_url"],
164
- "source_name": d["source_name"],
165
- "license": d["license"],
166
- "figures_urls": d["figures"],
167
- "figures": [base_path + fg.lstrip(".") for fg in d["local_figures"]],
168
- "figures_paths": [base_path + fg.lstrip(".") for fg in d["local_figures"]],
169
- "icd-10": d["icd-10"],
170
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install bs4 syntok
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  import os
4
+ import random
 
5
 
6
  import datasets
7
 
8
+ import numpy as np
9
+ from bs4 import BeautifulSoup, ResultSet
10
+ from syntok.tokenizer import Tokenizer
11
 
12
+ tokenizer = Tokenizer()
13
 
14
+ _CITATION = """\
15
+ @InProceedings{Kocabiyikoglu2022,
16
+ author = "Alican Kocabiyikoglu and Fran{\c c}ois Portet and Prudence Gibert and Hervé Blanchon and Jean-Marc Babouchkine and Gaëtan Gavazzi",
17
+ title = "A Spoken Drug Prescription Dataset in French for Spoken Language Understanding",
18
+ booktitle = "13th Language Resources and Evaluation Conference (LREC 2022)",
19
+ year = "2022",
20
+ location = "Marseille, France"
21
+ }
22
+ """
23
 
24
+ _DESCRIPTION = """\
25
+ PxSLU is to the best of our knowledge, the first spoken medical drug prescriptions corpus to be distributed. It contains 4 hours of transcribed
26
+ and annotated dialogues of drug prescriptions in French acquired through an experiment with 55 participants experts and non-experts in drug prescriptions.
27
 
28
+ The automatic transcriptions were verified by human effort and aligned with semantic labels to allow training of NLP models. The data acquisition
29
+ protocol was reviewed by medical experts and permit free distribution without breach of privacy and regulation.
30
+
31
+ Overview of the Corpus
32
 
33
+ The experiment has been performed in wild conditions with naive participants and medical experts. In total, the dataset includes 1981 recordings
34
+ of 55 participants (38% non-experts, 25% doctors, 36% medical practitioners), manually transcribed and semantically annotated.
35
  """
36
 
37
+ _URL = "https://zenodo.org/record/6524162/files/pxslu.zip?download=1"
 
38
 
39
+ class PxCorpus(datasets.GeneratorBasedBuilder):
40
 
41
  BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(name=f"default", version="1.0.0", description=f"PxCorpus data"),
43
  ]
44
 
45
  DEFAULT_CONFIG_NAME = "default"
46
 
47
  def _info(self):
48
+
49
  features = datasets.Features(
50
  {
51
+ "id": datasets.Value("string"),
52
+ "text": datasets.Value("string"),
53
+ "label": datasets.features.ClassLabel(
54
+ names=["medical_prescription", "negate", "none", "replace"],
 
 
55
  ),
56
+ "tokens": datasets.Sequence(datasets.Value("string")),
57
+ "ner_tags": datasets.Sequence(
58
+ datasets.features.ClassLabel(
59
+ names=['O', 'B-A', 'B-cma_event', 'B-d_dos_form', 'B-d_dos_form_ext', 'B-d_dos_up', 'B-d_dos_val', 'B-dos_cond', 'B-dos_uf', 'B-dos_val', 'B-drug', 'B-dur_ut', 'B-dur_val', 'B-fasting', 'B-freq_days', 'B-freq_int_v1', 'B-freq_int_v1_ut', 'B-freq_int_v2', 'B-freq_int_v2_ut', 'B-freq_startday', 'B-freq_ut', 'B-freq_val', 'B-inn', 'B-max_unit_uf', 'B-max_unit_ut', 'B-max_unit_val', 'B-min_gap_ut', 'B-min_gap_val', 'B-qsp_ut', 'B-qsp_val', 'B-re_ut', 'B-re_val', 'B-rhythm_hour', 'B-rhythm_perday', 'B-rhythm_rec_ut', 'B-rhythm_rec_val', 'B-rhythm_tdte', 'B-roa', 'I-cma_event', 'I-d_dos_form', 'I-d_dos_form_ext', 'I-d_dos_up', 'I-d_dos_val', 'I-dos_cond', 'I-dos_uf', 'I-dos_val', 'I-drug', 'I-fasting', 'I-freq_startday', 'I-inn', 'I-rhythm_tdte', 'I-roa'],
60
+ ),
61
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  }
63
  )
64
 
65
  return datasets.DatasetInfo(
66
  description=_DESCRIPTION,
67
  features=features,
 
 
68
  citation=_CITATION,
69
+ supervised_keys=None,
70
  )
71
 
72
  def _split_generators(self, dl_manager):
 
73
 
74
  data_dir = dl_manager.download_and_extract(_URL)
 
 
 
75
 
76
+ print(data_dir)
77
+
78
  return [
79
  datasets.SplitGenerator(
80
  name=datasets.Split.TRAIN,
81
  gen_kwargs={
82
+ "filepath_1": os.path.join(data_dir, "seq.in"),
83
+ "filepath_2": os.path.join(data_dir, "seq.label"),
84
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
85
+ "split": "train",
86
  },
87
  ),
88
  datasets.SplitGenerator(
89
  name=datasets.Split.VALIDATION,
90
  gen_kwargs={
91
+ "filepath_1": os.path.join(data_dir, "seq.in"),
92
+ "filepath_2": os.path.join(data_dir, "seq.label"),
93
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
94
+ "split": "validation",
95
  },
96
  ),
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TEST,
99
  gen_kwargs={
100
+ "filepath_1": os.path.join(data_dir, "seq.in"),
101
+ "filepath_2": os.path.join(data_dir, "seq.label"),
102
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
103
+ "split": "test",
104
  },
105
  ),
106
  ]
107
 
108
+ def getTokenTags(self, document):
109
+
110
+ tokens = []
111
+ ner_tags = []
112
+
113
+ for pair in document.split("\n"):
114
+
115
+ if len(pair) <= 0:
116
+ continue
117
+
118
+ text, label = pair.split("\t")
119
+ tokens.append(text)
120
+ ner_tags.append(label)
121
+
122
+ return tokens, ner_tags
123
+
124
+ def _generate_examples(self, filepath_1, filepath_2, filepath_3, split):
125
+
126
+ key = 0
127
+ all_res = []
128
+
129
+ f_seq_in = open(filepath_1, "r")
130
+ seq_in = f_seq_in.read().split("\n")
131
+ f_seq_in.close()
132
+
133
+ f_seq_label = open(filepath_2, "r")
134
+ seq_label = f_seq_label.read().split("\n")
135
+ f_seq_label.close()
136
+
137
+ f_in_ner = open(filepath_3, "r")
138
+ docs = f_in_ner.read().split("\n\n")
139
+ f_in_ner.close()
140
+
141
+ for idx, doc in enumerate(docs):
142
+
143
+ text = seq_in[idx]
144
+ label = seq_label[idx]
145
+
146
+ tokens, ner_tags = self.getTokenTags(docs[idx])
147
+
148
+ if len(text) <= 0 or len(label) <= 0:
149
+ continue
150
+
151
+ all_res.append({
152
+ "id": key,
153
+ "text": text,
154
+ "label": label,
155
+ "tokens": tokens,
156
+ "ner_tags": ner_tags,
157
+ })
158
+
159
+ key += 1
160
+
161
+ ids = [r["id"] for r in all_res]
162
+
163
+ random.seed(4)
164
+ random.shuffle(ids)
165
+ random.shuffle(ids)
166
+ random.shuffle(ids)
167
+
168
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
169
+
170
+ if split == "train":
171
+ allowed_ids = list(train)
172
+ elif split == "validation":
173
+ allowed_ids = list(validation)
174
+ elif split == "test":
175
+ allowed_ids = list(test)
176
+
177
+ for r in all_res:
178
+ if r["id"] in allowed_ids:
179
+ yield r["id"], r