qanastek commited on
Commit
28eb071
·
1 Parent(s): 75efe75

Upload 2 files

Browse files
Files changed (2) hide show
  1. PxCorpus.py +181 -0
  2. test_dataset.py +11 -0
PxCorpus.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install bs4 syntok
2
+
3
+ import os
4
+ import random
5
+
6
+ import datasets
7
+
8
+ import numpy as np
9
+ from bs4 import BeautifulSoup, ResultSet
10
+ from syntok.tokenizer import Tokenizer
11
+
12
+ tokenizer = Tokenizer()
13
+
14
+ _CITATION = """\
15
+ @InProceedings{Kocabiyikoglu2022,
16
+ author = "Alican Kocabiyikoglu and Fran{\c c}ois Portet and Prudence Gibert and Hervé Blanchon and Jean-Marc Babouchkine and Gaëtan Gavazzi",
17
+ title = "A Spoken Drug Prescription Dataset in French for Spoken Language Understanding",
18
+ booktitle = "13th Language Resources and Evaluation Conference (LREC 2022)",
19
+ year = "2022",
20
+ location = "Marseille, France"
21
+ }
22
+ """
23
+
24
+ _DESCRIPTION = """\
25
+ PxSLU is to the best of our knowledge, the first spoken medical drug prescriptions corpus to be distributed. It contains 4 hours of transcribed
26
+ and annotated dialogues of drug prescriptions in French acquired through an experiment with 55 participants experts and non-experts in drug prescriptions.
27
+
28
+ The automatic transcriptions were verified by human effort and aligned with semantic labels to allow training of NLP models. The data acquisition
29
+ protocol was reviewed by medical experts and permit free distribution without breach of privacy and regulation.
30
+
31
+ Overview of the Corpus
32
+
33
+ The experiment has been performed in wild conditions with naive participants and medical experts. In total, the dataset includes 1981 recordings
34
+ of 55 participants (38% non-experts, 25% doctors, 36% medical practitioners), manually transcribed and semantically annotated.
35
+ """
36
+
37
+ _URL = "https://zenodo.org/record/6524162/files/pxslu.zip?download=1"
38
+
39
+ class PxCorpus(datasets.GeneratorBasedBuilder):
40
+
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(name=f"default", version="1.0.0", description=f"PxCorpus data"),
43
+ ]
44
+
45
+ DEFAULT_CONFIG_NAME = "default"
46
+
47
+ def _info(self):
48
+
49
+ features = datasets.Features(
50
+ {
51
+ "id": datasets.Value("string"),
52
+ "text": datasets.Value("string"),
53
+ "label": datasets.Value("string"),
54
+ # "label": datasets.features.ClassLabel(
55
+ # names=["medical_prescription", "negate", "none", "replace"],
56
+ # ),
57
+ "tokens": datasets.Sequence(datasets.Value("string")),
58
+ "ner_tags": datasets.Sequence(
59
+ datasets.Value("string"),
60
+ # datasets.features.ClassLabel(
61
+ # names=["O", "I-RML"],
62
+ # ),
63
+ ),
64
+ }
65
+ )
66
+
67
+ return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
+ features=features,
70
+ citation=_CITATION,
71
+ supervised_keys=None,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+
76
+ data_dir = dl_manager.download_and_extract(_URL)
77
+
78
+ print(data_dir)
79
+
80
+ return [
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TRAIN,
83
+ gen_kwargs={
84
+ "filepath_1": os.path.join(data_dir, "seq.in"),
85
+ "filepath_2": os.path.join(data_dir, "seq.label"),
86
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
87
+ "split": "train",
88
+ },
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.VALIDATION,
92
+ gen_kwargs={
93
+ "filepath_1": os.path.join(data_dir, "seq.in"),
94
+ "filepath_2": os.path.join(data_dir, "seq.label"),
95
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
96
+ "split": "validation",
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST,
101
+ gen_kwargs={
102
+ "filepath_1": os.path.join(data_dir, "seq.in"),
103
+ "filepath_2": os.path.join(data_dir, "seq.label"),
104
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
105
+ "split": "test",
106
+ },
107
+ ),
108
+ ]
109
+
110
+ def getTokenTags(self, document):
111
+
112
+ tokens = []
113
+ ner_tags = []
114
+
115
+ for pair in document.split("\n"):
116
+
117
+ if len(pair) <= 0:
118
+ continue
119
+
120
+ text, label = pair.split("\t")
121
+ tokens.append(text)
122
+ ner_tags.append(label)
123
+
124
+ return tokens, ner_tags
125
+
126
+ def _generate_examples(self, filepath_1, filepath_2, filepath_3, split):
127
+
128
+ key = 0
129
+ all_res = []
130
+
131
+ f_seq_in = open(filepath_1, "r")
132
+ seq_in = f_seq_in.read().split("\n")
133
+ f_seq_in.close()
134
+
135
+ f_seq_label = open(filepath_2, "r")
136
+ seq_label = f_seq_label.read().split("\n")
137
+ f_seq_label.close()
138
+
139
+ f_in_ner = open(filepath_3, "r")
140
+ docs = f_in_ner.read().split("\n\n")
141
+ f_in_ner.close()
142
+
143
+ for idx, doc in enumerate(docs):
144
+
145
+ text = seq_in[idx]
146
+ label = seq_label[idx]
147
+
148
+ tokens, ner_tags = self.getTokenTags(docs[idx])
149
+
150
+ if len(text) <= 0 or len(label) <= 0:
151
+ continue
152
+
153
+ all_res.append({
154
+ "id": key,
155
+ "text": text,
156
+ "label": label,
157
+ "tokens": tokens,
158
+ "ner_tags": ner_tags,
159
+ })
160
+
161
+ key += 1
162
+
163
+ ids = [r["id"] for r in all_res]
164
+
165
+ random.seed(4)
166
+ random.shuffle(ids)
167
+ random.shuffle(ids)
168
+ random.shuffle(ids)
169
+
170
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
171
+
172
+ if split == "train":
173
+ allowed_ids = list(train)
174
+ elif split == "validation":
175
+ allowed_ids = list(validation)
176
+ elif split == "test":
177
+ allowed_ids = list(test)
178
+
179
+ for r in all_res:
180
+ if r["id"] in allowed_ids:
181
+ yield r["id"], r
test_dataset.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from datasets import load_dataset
4
+
5
+ dataset = load_dataset("./PxCorpus.py")
6
+ print(dataset)
7
+ # for d in dataset["train"]:
8
+ # if d["label"] != "medical_prescription":
9
+ # print(d)
10
+ # print(dataset["train"][168])
11
+ # print(json.dumps(dataset["train"][0], sort_keys=True, indent=4))