Datasets:

Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
expert-generated
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
License:
albertvillanova HF staff commited on
Commit
a3cbb0d
1 Parent(s): 30c94e3

Enable dataset viewer by hosting data files (#4)

Browse files

- Host data files (db080b6550b421fbaa60106cbe2f4fa0de7e9636)
- Update loading script (bce62c259f4a7260574d8d2b831a2b239a3098f4)
- Add paperswithcode id (c788cd703309b3ea548e7978820858c6df3056ea)
- Delete legacy dataset_infos.json (a4766250dae8cf83c41ad4ca2c35578fd30288b9)

README.md CHANGED
@@ -20,6 +20,7 @@ task_categories:
20
  task_ids:
21
  - closed-domain-qa
22
  pretty_name: MedDialog
 
23
  dataset_info:
24
  - config_name: en
25
  features:
 
20
  task_ids:
21
  - closed-domain-qa
22
  pretty_name: MedDialog
23
+ paperswithcode_id: meddialog
24
  dataset_info:
25
  - config_name: en
26
  features:
data/Medical-Dialogue-Dataset-Chinese.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d7e5b8ab5c09ba2fd015b4363461d6a026ba994ef799666c5bc6e367438bb4d
3
+ size 2406679418
data/Medical-Dialogue-Dataset-English.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:899635d6be9489602f432ea70d24be6a3c1ef1d6ccd22564f33946ee60f20f8c
3
+ size 93916317
data/processed-chinese.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d368320555045b773d24b4b3bf295c3c6b62a3b46d537d25484f3948c00cffe
3
+ size 809796157
data/processed-english.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12cd3161693c7e5e15d9875ea902ca7d7471db4f0eb4ffd77a662c3dc51517be
3
+ size 139172
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"en": {"description": "The MedDialog dataset (English) contains conversations (in English) between doctors and patients.It has 0.26 million dialogues. The data is continuously growing and more dialogues will be added. The raw dialogues are from healthcaremagic.com and icliniq.com.\nAll copyrights of the data belong to healthcaremagic.com and icliniq.com.\n", "citation": "@article{chen2020meddiag,\n title={MedDialog: a large-scale medical dialogue dataset},\n author={Chen, Shu and Ju, Zeqian and Dong, Xiangyu and Fang, Hongchao and Wang, Sicheng and Yang, Yue and Zeng, Jiaqi and Zhang, Ruisi and Zhang, Ruoyu and Zhou, Meng and Zhu, Penghui and Xie, Pengtao},\n journal={arXiv preprint arXiv:2004.03329},\n year={2020}\n}\n", "homepage": "https://github.com/UCSD-AI4H/Medical-Dialogue-System", "license": "", "features": {"file_name": {"dtype": "string", "id": null, "_type": "Value"}, "dialogue_id": {"dtype": "int32", "id": null, "_type": "Value"}, "dialogue_url": {"dtype": "string", "id": null, "_type": "Value"}, "dialogue_turns": {"feature": {"speaker": {"num_classes": 2, "names": ["Patient", "Doctor"], "id": null, "_type": "ClassLabel"}, "utterance": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "medical_dialog", "config_name": "en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 290274759, "num_examples": 229674, "dataset_name": "medical_dialog"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 290274759, "size_in_bytes": 290274759}, "zh": {"description": "The MedDialog dataset (English) contains conversations (in English) between doctors and patients.It has 0.26 million dialogues. The data is continuously growing and more dialogues will be added. The raw dialogues are from healthcaremagic.com and icliniq.com.\nAll copyrights of the data belong to healthcaremagic.com and icliniq.com.\n", "citation": "@article{chen2020meddiag,\n title={MedDialog: a large-scale medical dialogue dataset},\n author={Chen, Shu and Ju, Zeqian and Dong, Xiangyu and Fang, Hongchao and Wang, Sicheng and Yang, Yue and Zeng, Jiaqi and Zhang, Ruisi and Zhang, Ruoyu and Zhou, Meng and Zhu, Penghui and Xie, Pengtao},\n journal={arXiv preprint arXiv:2004.03329},\n year={2020}\n}\n", "homepage": "https://github.com/UCSD-AI4H/Medical-Dialogue-System", "license": "", "features": {"file_name": {"dtype": "string", "id": null, "_type": "Value"}, "dialogue_id": {"dtype": "int32", "id": null, "_type": "Value"}, "dialogue_url": {"dtype": "string", "id": null, "_type": "Value"}, "dialogue_turns": {"feature": {"speaker": {"num_classes": 2, "names": ["\u75c5\u4eba", "\u533b\u751f"], "id": null, "_type": "ClassLabel"}, "utterance": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "medical_dialog", "config_name": "zh", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1092063621, "num_examples": 1921127, "dataset_name": "medical_dialog"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1092063621, "size_in_bytes": 1092063621}, "processed.en": {"description": "The MedDialog dataset (English) contains conversations (in English) between doctors and patients.It has 0.26 million dialogues. The data is continuously growing and more dialogues will be added. The raw dialogues are from healthcaremagic.com and icliniq.com.\nAll copyrights of the data belong to healthcaremagic.com and icliniq.com.\n", "citation": "@article{chen2020meddiag,\n title={MedDialog: a large-scale medical dialogue dataset},\n author={Chen, Shu and Ju, Zeqian and Dong, Xiangyu and Fang, Hongchao and Wang, Sicheng and Yang, Yue and Zeng, Jiaqi and Zhang, Ruisi and Zhang, Ruoyu and Zhou, Meng and Zhu, Penghui and Xie, Pengtao},\n journal={arXiv preprint arXiv:2004.03329},\n year={2020}\n}\n", "homepage": "https://github.com/UCSD-AI4H/Medical-Dialogue-System", "license": "Copyright", "features": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "utterances": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "medical_dialog", "config_name": "processed.en", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 370745, "num_examples": 482, "dataset_name": "medical_dialog"}, "validation": {"name": "validation", "num_bytes": 52145, "num_examples": 60, "dataset_name": "medical_dialog"}, "test": {"name": "test", "num_bytes": 46514, "num_examples": 61, "dataset_name": "medical_dialog"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1ria4E6IdTIPsikL4Glm3uy1tFKJKw0W8": {"num_bytes": 414490, "checksum": "568a9c6c670502eec3319c78e9d12c0aebb883c0d1e45095b5dd5f99d8b6b874"}, "https://drive.google.com/uc?export=download&id=1KAZneuwdfEVQQM6euCX4pMDP-9DQpiB5": {"num_bytes": 57706, "checksum": "a5cd29f17fcfedf01af41410e12e47474ba1176f376136e18fc0446b7e2f52b2"}, "https://drive.google.com/uc?export=download&id=10izqL71kcgnteYsf87Vh6j_mZ8sZM2Rc": {"num_bytes": 52018, "checksum": "316e5b3eb03ec7210b0d84414df0e84a42b396205d72a2b5fdba533fd19a5ebd"}}, "download_size": 524214, "post_processing_size": null, "dataset_size": 469404, "size_in_bytes": 993618}, "processed.zh": {"description": "The MedDialog dataset (English) contains conversations (in English) between doctors and patients.It has 0.26 million dialogues. The data is continuously growing and more dialogues will be added. The raw dialogues are from healthcaremagic.com and icliniq.com.\nAll copyrights of the data belong to healthcaremagic.com and icliniq.com.\n", "citation": "@article{chen2020meddiag,\n title={MedDialog: a large-scale medical dialogue dataset},\n author={Chen, Shu and Ju, Zeqian and Dong, Xiangyu and Fang, Hongchao and Wang, Sicheng and Yang, Yue and Zeng, Jiaqi and Zhang, Ruisi and Zhang, Ruoyu and Zhou, Meng and Zhu, Penghui and Xie, Pengtao},\n journal={arXiv preprint arXiv:2004.03329},\n year={2020}\n}\n", "homepage": "https://github.com/UCSD-AI4H/Medical-Dialogue-System", "license": "Copyright", "features": {"utterances": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "medical_dialog", "config_name": "processed.zh", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1571262099, "num_examples": 2725989, "dataset_name": "medical_dialog"}, "validation": {"name": "validation", "num_bytes": 197117565, "num_examples": 340748, "dataset_name": "medical_dialog"}, "test": {"name": "test", "num_bytes": 196526738, "num_examples": 340754, "dataset_name": "medical_dialog"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1AaDJoHaiHAwEZwtskRH8oL1UP4FRgmgx": {"num_bytes": 1665206303, "checksum": "fd34385487755d95783cf834921bff14ceb74d9a244962577140c9e291dce4e9"}, "https://drive.google.com/uc?export=download&id=1TvfZCmQqP1kURIfEinOcj5VOPelTuGwI": {"num_bytes": 208871784, "checksum": "ed6b04ff4d62a4fa5b5b85327d692302b3369c0d28e9da887c12ec78ea778ce4"}, "https://drive.google.com/uc?export=download&id=1pmmG95Yl6mMXRXDDSRb9-bYTxOE7ank5": {"num_bytes": 208276068, "checksum": "b1118b614f866089a1daf18107a72dd5ba77c50a1e9ca145491ddcef89d797b7"}}, "download_size": 2082354155, "post_processing_size": null, "dataset_size": 1964906402, "size_in_bytes": 4047260557}}
 
 
medical_dialog.py CHANGED
@@ -46,15 +46,21 @@ _LICENSE = "Unknown"
46
 
47
  # URLS of processed data
48
  _URLS = {
49
- "en": {
50
- "train": "https://drive.google.com/uc?export=download&id=1ria4E6IdTIPsikL4Glm3uy1tFKJKw0W8",
51
- "validation": "https://drive.google.com/uc?export=download&id=1KAZneuwdfEVQQM6euCX4pMDP-9DQpiB5",
52
- "test": "https://drive.google.com/uc?export=download&id=10izqL71kcgnteYsf87Vh6j_mZ8sZM2Rc",
 
 
 
 
 
 
53
  },
54
- "zh": {
55
- "train": "https://drive.google.com/uc?export=download&id=1AaDJoHaiHAwEZwtskRH8oL1UP4FRgmgx",
56
- "validation": "https://drive.google.com/uc?export=download&id=1TvfZCmQqP1kURIfEinOcj5VOPelTuGwI",
57
- "test": "https://drive.google.com/uc?export=download&id=1pmmG95Yl6mMXRXDDSRb9-bYTxOE7ank5",
58
  },
59
  }
60
 
@@ -77,33 +83,6 @@ class MedicalDialog(datasets.GeneratorBasedBuilder):
77
  ),
78
  ]
79
 
80
- @property
81
- def manual_download_instructions(self):
82
- *processed, _ = self.config.name.split(".")
83
- return (
84
- None
85
- if processed
86
- else """\
87
- \n For English:\nYou need to go to https://drive.google.com/drive/folders/1g29ssimdZ6JzTST6Y8g6h-ogUNReBtJD?usp=sharing,\
88
- and manually download the dataset from Google Drive. Once it is completed,
89
- a file named Medical-Dialogue-Dataset-English-<timestamp-info>.zip will appear in your Downloads folder(
90
- or whichever folder your browser chooses to save files to). Unzip the folder to obtain
91
- a folder named "Medical-Dialogue-Dataset-English" several text files.
92
-
93
- Now, you can specify the path to this folder for the data_dir argument in the
94
- datasets.load_dataset(...) option.
95
- The <path/to/folder> can e.g. be "/Downloads/Medical-Dialogue-Dataset-English".
96
- The data can then be loaded using the below command:\
97
- `datasets.load_dataset("medical_dialog", name="en", data_dir="/Downloads/Medical-Dialogue-Dataset-English")`.
98
-
99
- \n For Chinese:\nFollow the above process. Change the 'name' to 'zh'.The download link is https://drive.google.com/drive/folders/1r09_i8nJ9c1nliXVGXwSqRYqklcHd9e2
100
-
101
- **NOTE**
102
- - A caution while downloading from drive. It is better to download single files since creating a zip might not include files <500 MB. This has been observed mutiple times.
103
- - After downloading the files and adding them to the appropriate folder, the path of the folder can be given as input tu the data_dir path.
104
- """
105
- )
106
-
107
  def _info(self):
108
  if self.config.name == "zh":
109
  features = datasets.Features(
@@ -158,23 +137,13 @@ class MedicalDialog(datasets.GeneratorBasedBuilder):
158
  """Returns SplitGenerators."""
159
  *processed, lang = self.config.name.split(".")
160
  if processed:
161
- data_dir = dl_manager.download(_URLS[lang])
 
162
  splits = [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
163
- return [datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": data_dir[split]}) for split in splits]
164
  else:
165
- path_to_manual_file = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
166
- if not os.path.exists(path_to_manual_file):
167
- raise FileNotFoundError(
168
- f"{path_to_manual_file} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('medical_dialog', data_dir=...)`. Manual download instructions: {self.manual_download_instructions})"
169
- )
170
-
171
- filepaths = [
172
- os.path.join(path_to_manual_file, txt_file_name)
173
- for txt_file_name in sorted(os.listdir(path_to_manual_file))
174
- if txt_file_name.endswith("txt")
175
- ]
176
-
177
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths})]
178
 
179
  def _generate_examples(self, filepaths):
180
  """Yields examples. Iterates over each file and give the creates the corresponding features.
@@ -205,130 +174,130 @@ class MedicalDialog(datasets.GeneratorBasedBuilder):
205
  array = ""
206
  else:
207
  id_ = -1
208
- for filepath in filepaths:
209
- with open(filepath, encoding="utf-8") as f_in:
210
- # Parameters to just "sectionize" the raw data
211
- last_part = ""
212
- last_dialog = {}
213
- last_list = []
214
- last_user = ""
215
- check_list = []
216
-
217
- # These flags are present to have a single function address both chinese and english data
218
- # English data is a little hahazard (i.e. the sentences spans multiple different lines),
219
- # Chinese is compact with one line for doctor and patient.
220
- conv_flag = False
221
- des_flag = False
222
-
223
- while True:
224
- line = f_in.readline()
225
- if not line:
226
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
228
- # Extracting the dialog id
229
- if line[:2] == "id": # Hardcode alert!
230
- # Handling ID references that may come in the description
231
- # These were observed in the Chinese dataset and were not
232
- # followed by numbers
233
- try:
234
- dialogue_id = int(re.findall(r"\d+", line)[0])
235
- except IndexError:
236
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
- # Extracting the url
239
- if line[:4] == "http": # Hardcode alert!
240
- dialogue_url = line.rstrip()
241
 
242
- # Extracting the patient info from description.
243
- if line[:11] == "Description": # Hardcode alert!
244
- last_part = "description"
245
- last_dialog = {}
246
- last_list = []
247
- last_user = ""
248
- last_conv = {"speaker": "", "utterance": ""}
249
  while True:
250
- line = f_in.readline()
251
  if (not line) or (line in ["\n", "\n\r"]):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  break
253
- else:
254
- if data_lang == "zh": # Condition in chinese
255
- if line[:5] == "病情描述:": # Hardcode alert!
256
- last_user = "病人"
257
- sen = f_in.readline().rstrip()
258
- des_flag = True
259
 
260
- if data_lang == "en":
261
- last_user = "Patient"
262
- sen = line.rstrip()
263
- des_flag = True
264
-
265
- if des_flag:
266
- if sen == "":
267
- continue
268
- if sen in check_list:
269
- last_conv["speaker"] = ""
270
- last_conv["utterance"] = ""
271
- else:
272
- last_conv["speaker"] = last_user
273
- last_conv["utterance"] = sen
274
- check_list.append(sen)
275
- des_flag = False
276
- break
277
- # Extracting the conversation info from dialogue.
278
- elif line[:8] == "Dialogue": # Hardcode alert!
279
- if last_part == "description" and len(last_conv["utterance"]) > 0:
280
- last_part = "dialogue"
281
  if data_lang == "zh":
282
- last_user = "病人"
 
 
 
283
 
 
 
284
  if data_lang == "en":
285
- last_user = "Patient"
286
-
287
- while True:
288
- line = f_in.readline()
289
- if (not line) or (line in ["\n", "\n\r"]):
290
- conv_flag = False
291
- last_user = ""
 
 
 
 
 
 
 
 
 
 
292
  last_list.append(copy.deepcopy(last_conv))
293
- # To ensure close of conversation, only even number of sentences
294
- # are extracted
295
- last_turn = len(last_list)
296
- if int(last_turn / 2) > 0:
297
- temp = int(last_turn / 2)
298
- id_ += 1
299
- last_dialog["file_name"] = filepath
300
- last_dialog["dialogue_id"] = dialogue_id
301
- last_dialog["dialogue_url"] = dialogue_url
302
- last_dialog["dialogue_turns"] = last_list[: temp * 2]
303
- yield id_, last_dialog
304
- break
305
-
306
- if data_lang == "zh":
307
- if line[:3] == "病人:" or line[:3] == "医生:": # Hardcode alert!
308
- user = line[:2] # Hardcode alert!
309
- line = f_in.readline()
310
- conv_flag = True
311
-
312
- # The elif block is to ensure that multi-line sentences are captured.
313
- # This has been observed only in english.
314
- if data_lang == "en":
315
- if line.strip() == "Patient:" or line.strip() == "Doctor:": # Hardcode alert!
316
- user = line.replace(":", "").rstrip()
317
- line = f_in.readline()
318
- conv_flag = True
319
- elif line[:2] != "id": # Hardcode alert!
320
- conv_flag = True
321
-
322
- # Continues till the next ID is parsed
323
- if conv_flag:
324
- sen = line.rstrip()
325
- if sen == "":
326
- continue
327
-
328
- if user == last_user:
329
- last_conv["utterance"] = last_conv["utterance"] + sen
330
- else:
331
- last_user = user
332
- last_list.append(copy.deepcopy(last_conv))
333
- last_conv["utterance"] = sen
334
- last_conv["speaker"] = user
 
46
 
47
  # URLS of processed data
48
  _URLS = {
49
+ "en": "data/Medical-Dialogue-Dataset-English.zip",
50
+ "zh": "data/Medical-Dialogue-Dataset-Chinese.zip",
51
+ "processed.en": "data/processed-english.zip",
52
+ "processed.zh": "data/processed-chinese.zip",
53
+ }
54
+ _FILENAMES = {
55
+ "processed.en": {
56
+ "train": "english-train.json",
57
+ "validation": "english-dev.json",
58
+ "test": "english-test.json",
59
  },
60
+ "processed.zh": {
61
+ "train": "train_data.json",
62
+ "validation": "validate_data.json",
63
+ "test": "test_data.json",
64
  },
65
  }
66
 
 
83
  ),
84
  ]
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  def _info(self):
87
  if self.config.name == "zh":
88
  features = datasets.Features(
 
137
  """Returns SplitGenerators."""
138
  *processed, lang = self.config.name.split(".")
139
  if processed:
140
+ # data_dir = dl_manager.download(_URLS[lang])
141
+ data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
142
  splits = [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
143
+ return [datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": os.path.join(data_dir, _FILENAMES[self.config.name][split])}) for split in splits]
144
  else:
145
+ archive = dl_manager.download(_URLS[self.config.name])
146
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": dl_manager.iter_archive(archive)})]
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  def _generate_examples(self, filepaths):
149
  """Yields examples. Iterates over each file and give the creates the corresponding features.
 
174
  array = ""
175
  else:
176
  id_ = -1
177
+ for filepath, f_in in filepaths:
178
+ # with open(filepath, encoding="utf-8") as f_in:
179
+ # Parameters to just "sectionize" the raw data
180
+ last_part = ""
181
+ last_dialog = {}
182
+ last_list = []
183
+ last_user = ""
184
+ check_list = []
185
+
186
+ # These flags are present to have a single function address both chinese and english data
187
+ # English data is a little hahazard (i.e. the sentences spans multiple different lines),
188
+ # Chinese is compact with one line for doctor and patient.
189
+ conv_flag = False
190
+ des_flag = False
191
+
192
+ while True:
193
+ line = f_in.readline().decode("utf-8")
194
+ if not line:
195
+ break
196
+
197
+ # Extracting the dialog id
198
+ if line[:2] == "id": # Hardcode alert!
199
+ # Handling ID references that may come in the description
200
+ # These were observed in the Chinese dataset and were not
201
+ # followed by numbers
202
+ try:
203
+ dialogue_id = int(re.findall(r"\d+", line)[0])
204
+ except IndexError:
205
+ continue
206
+
207
+ # Extracting the url
208
+ if line[:4] == "http": # Hardcode alert!
209
+ dialogue_url = line.rstrip()
210
+
211
+ # Extracting the patient info from description.
212
+ if line[:11] == "Description": # Hardcode alert!
213
+ last_part = "description"
214
+ last_dialog = {}
215
+ last_list = []
216
+ last_user = ""
217
+ last_conv = {"speaker": "", "utterance": ""}
218
+ while True:
219
+ line = f_in.readline().decode("utf-8")
220
+ if (not line) or (line in ["\n", "\n\r"]):
221
+ break
222
+ else:
223
+ if data_lang == "zh": # Condition in chinese
224
+ if line[:5] == "病情描述:": # Hardcode alert!
225
+ last_user = "病人"
226
+ sen = f_in.readline().decode("utf-8").rstrip()
227
+ des_flag = True
228
 
229
+ if data_lang == "en":
230
+ last_user = "Patient"
231
+ sen = line.rstrip()
232
+ des_flag = True
233
+
234
+ if des_flag:
235
+ if sen == "":
236
+ continue
237
+ if sen in check_list:
238
+ last_conv["speaker"] = ""
239
+ last_conv["utterance"] = ""
240
+ else:
241
+ last_conv["speaker"] = last_user
242
+ last_conv["utterance"] = sen
243
+ check_list.append(sen)
244
+ des_flag = False
245
+ break
246
+ # Extracting the conversation info from dialogue.
247
+ elif line[:8] == "Dialogue": # Hardcode alert!
248
+ if last_part == "description" and len(last_conv["utterance"]) > 0:
249
+ last_part = "dialogue"
250
+ if data_lang == "zh":
251
+ last_user = "病人"
252
 
253
+ if data_lang == "en":
254
+ last_user = "Patient"
 
255
 
 
 
 
 
 
 
 
256
  while True:
257
+ line = f_in.readline().decode("utf-8")
258
  if (not line) or (line in ["\n", "\n\r"]):
259
+ conv_flag = False
260
+ last_user = ""
261
+ last_list.append(copy.deepcopy(last_conv))
262
+ # To ensure close of conversation, only even number of sentences
263
+ # are extracted
264
+ last_turn = len(last_list)
265
+ if int(last_turn / 2) > 0:
266
+ temp = int(last_turn / 2)
267
+ id_ += 1
268
+ last_dialog["file_name"] = filepath
269
+ last_dialog["dialogue_id"] = dialogue_id
270
+ last_dialog["dialogue_url"] = dialogue_url
271
+ last_dialog["dialogue_turns"] = last_list[: temp * 2]
272
+ yield id_, last_dialog
273
  break
 
 
 
 
 
 
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  if data_lang == "zh":
276
+ if line[:3] == "病人:" or line[:3] == "医生:": # Hardcode alert!
277
+ user = line[:2] # Hardcode alert!
278
+ line = f_in.readline().decode("utf-8")
279
+ conv_flag = True
280
 
281
+ # The elif block is to ensure that multi-line sentences are captured.
282
+ # This has been observed only in english.
283
  if data_lang == "en":
284
+ if line.strip() == "Patient:" or line.strip() == "Doctor:": # Hardcode alert!
285
+ user = line.replace(":", "").rstrip()
286
+ line = f_in.readline().decode("utf-8")
287
+ conv_flag = True
288
+ elif line[:2] != "id": # Hardcode alert!
289
+ conv_flag = True
290
+
291
+ # Continues till the next ID is parsed
292
+ if conv_flag:
293
+ sen = line.rstrip()
294
+ if sen == "":
295
+ continue
296
+
297
+ if user == last_user:
298
+ last_conv["utterance"] = last_conv["utterance"] + sen
299
+ else:
300
+ last_user = user
301
  last_list.append(copy.deepcopy(last_conv))
302
+ last_conv["utterance"] = sen
303
+ last_conv["speaker"] = user