thbndi commited on
Commit
3bbae21
1 Parent(s): 00c48b0

Update dataset_utils.py

Browse files
Files changed (1) hide show
  1. dataset_utils.py +366 -553
dataset_utils.py CHANGED
@@ -1,580 +1,393 @@
1
- import os
2
  import pandas as pd
3
- import datasets
4
- import sys
5
  import pickle
6
- import subprocess
7
- import shutil
8
- from urllib.request import urlretrieve
9
- from sklearn.model_selection import train_test_split
10
- from sklearn.preprocessing import LabelEncoder
11
- import yaml
12
  import numpy as np
13
- from .dataset_utils import vocab, concat_data, generate_deep, generate_ml, generate_text, open_dict
14
- from .task_cohort import create_cohort
15
-
16
-
17
-
18
- _DESCRIPTION = """\
19
- Dataset for mimic4 data, by default for the Mortality task.
20
- Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
21
- The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
22
- mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2"
23
- If you choose a Custom task provide a configuration file for the Time series.
24
- Currently working with Mimic-IV version 1 and 2
25
- """
26
- _BASE_URL = "https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main"
27
- _HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
28
-
29
- _CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
30
- _GIT_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline"
31
-
32
- _ICD_CODE = f"{_BASE_URL}/icd10.txt"
33
- _DATA_GEN = f"{_BASE_URL}/data_generation_icu_modify.py"
34
- _DATA_GEN_HOSP= f"{_BASE_URL}/data_generation_modify.py"
35
- _DAY_INT= f"{_BASE_URL}/day_intervals_cohort_v22.py"
36
- _CONFIG_URLS = {'los' : f"{_BASE_URL}/config/los.config",
37
- 'mortality' : f"{_BASE_URL}/config/mortality.config",
38
- 'phenotype' : f"{_BASE_URL}/config/phenotype.config",
39
- 'readmission' : f"{_BASE_URL}/config/readmission.config"
40
- }
41
-
42
-
43
- class Mimic4DatasetConfig(datasets.BuilderConfig):
44
- """BuilderConfig for Mimic4Dataset."""
45
-
46
- def __init__(
47
- self,
48
- **kwargs,
49
- ):
50
- super().__init__(**kwargs)
51
-
52
-
53
- class Mimic4Dataset(datasets.GeneratorBasedBuilder):
54
- """Create Mimic4Dataset dataset from Mimic-IV data stored in user machine."""
55
- VERSION = datasets.Version("1.0.0")
56
-
57
- def __init__(self, **kwargs):
58
- self.mimic_path = kwargs.pop("mimic_path", None)
59
- self.encoding = kwargs.pop("encoding",'concat')
60
- self.config_path = kwargs.pop("config_path",None)
61
- self.test_size = kwargs.pop("test_size",0.2)
62
- self.val_size = kwargs.pop("val_size",0.1)
63
- self.generate_cohort = kwargs.pop("generate_cohort",True)
64
-
65
- if self.encoding == 'concat':
66
- self.concat = True
67
- else:
68
- self.concat = False
69
-
70
- super().__init__(**kwargs)
71
 
 
 
 
 
 
 
 
72
 
73
- BUILDER_CONFIGS = [
74
- Mimic4DatasetConfig(
75
- name="Phenotype",
76
- version=VERSION,
77
- description="Dataset for mimic4 Phenotype task"
78
- ),
79
- Mimic4DatasetConfig(
80
- name="Readmission",
81
- version=VERSION,
82
- description="Dataset for mimic4 Readmission task"
83
- ),
84
- Mimic4DatasetConfig(
85
- name="Length of Stay",
86
- version=VERSION,
87
- description="Dataset for mimic4 Length of Stay task"
88
- ),
89
- Mimic4DatasetConfig(
90
- name="Mortality",
91
- version=VERSION,
92
- description="Dataset for mimic4 Mortality task"
93
- ),
94
- ]
95
-
96
- DEFAULT_CONFIG_NAME = "Mortality"
97
 
98
- def init_cohort(self):
99
- if self.config_path==None:
100
- if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype']
101
- if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission']
102
- if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los']
103
- if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- version = self.mimic_path.split('/')[-1]
106
- mimic_folder= self.mimic_path.split('/')[-2]
107
- mimic_complete_path='/'+mimic_folder+'/'+version
108
-
109
- current_directory = os.getcwd()
110
- if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'):
111
- dir =os.path.dirname(current_directory)
112
- os.chdir(dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  else:
114
- #move to parent directory of mimic data
115
- dir = self.mimic_path.replace(mimic_complete_path,'')
116
- print('dir : ',dir)
117
- if dir[-1]!='/':
118
- dir=dir+'/'
119
- elif dir=='':
120
- dir="./"
121
- parent_dir = os.path.dirname(self.mimic_path)
122
- os.chdir(parent_dir)
123
-
124
- #####################clone git repo if doesnt exists
125
- repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline'
126
- if os.path.exists('MIMIC-IV-Data-Pipeline-main'):
127
- path_bench = './MIMIC-IV-Data-Pipeline-main'
 
 
 
 
 
 
 
 
 
 
 
128
  else:
129
- path_bench ='./MIMIC-IV-Data-Pipeline-main'
130
- subprocess.run(["git", "clone", repo_url, path_bench])
131
- os.makedirs(path_bench+'/'+'mimic-iv')
132
- shutil.move(version,path_bench+'/'+'mimic-iv')
133
-
134
- os.chdir(path_bench)
135
- self.mimic_path = './'+'mimic-iv'+'/'+version
136
-
137
- ####################Get configurations param
138
- #download config file if not custom
139
- if self.config_path[0:4] == 'http':
140
- c = self.config_path.split('/')[-1]
141
- file_path, head = urlretrieve(self.config_path,c)
142
- else :
143
- file_path = self.config_path
144
- if not os.path.exists('./config'):
145
- os.makedirs('config')
146
-
147
- #save config file in config folder
148
- self.conf='./config/'+file_path.split('/')[-1]
149
- if not os.path.exists(self.conf):
150
- shutil.move(file_path,'./config')
151
- with open(self.conf) as f:
152
- config = yaml.safe_load(f)
153
-
154
-
155
- timeW = config['timeWindow']
156
- self.timeW=int(timeW.split()[1])
157
- self.bucket = config['timebucket']
158
- self.predW = config['predW']
159
-
160
- self.data_icu = config['icu_no_icu']=='ICU'
161
-
162
- if self.data_icu:
163
- self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out, self.feat_lab = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output'], False
164
  else:
165
- self.feat_cond, self.feat_lab, self.feat_proc, self.feat_meds, self.feat_chart, self.feat_out = config['diagnosis'], config['lab'], config['proc'], config['meds'], False, False
166
-
167
-
168
- #####################downloads modules from hub
169
- if not os.path.exists('./icd10.txt'):
170
- file_path, head = urlretrieve(_ICD_CODE, "icd10.txt")
171
- shutil.move(file_path, './')
172
-
173
- if not os.path.exists('./model/data_generation_icu_modify.py'):
174
- file_path, head = urlretrieve(_DATA_GEN, "data_generation_icu_modify.py")
175
- shutil.move(file_path, './model')
176
-
177
- if not os.path.exists('./model/data_generation_modify.py'):
178
- file_path, head = urlretrieve(_DATA_GEN_HOSP, "data_generation_modify.py")
179
- shutil.move(file_path, './model')
180
-
181
- if not os.path.exists('./preprocessing/day_intervals_preproc/day_intervals_cohort_v22.py'):
182
- file_path, head = urlretrieve(_DAY_INT, "day_intervals_cohort_v22.py")
183
- shutil.move(file_path, './preprocessing/day_intervals_preproc')
184
-
185
- data_dir = "./data/dict/"+self.config.name.replace(" ","_")+"/dataDic"
186
- sys.path.append(path_bench)
187
- config = self.config_path.split('/')[-1]
188
-
189
- #####################create task cohort
190
- if self.generate_cohort:
191
- create_cohort(self.config.name.replace(" ","_"),self.mimic_path,config)
192
-
193
- #####################Split data into train, test and val
194
- with open(data_dir, 'rb') as fp:
195
- dataDic = pickle.load(fp)
196
- data = pd.DataFrame.from_dict(dataDic)
197
-
198
- dict_dir = "./data/dict/"+self.config.name.replace(" ","_")
199
-
200
- data=data.T
201
- train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
202
- if self.val_size > 0 :
203
- train_data, val_data = train_test_split(train_data, test_size=self.val_size, random_state=42)
204
- val_dic = val_data.to_dict('index')
205
- val_path = dict_dir+'/val_data.pkl'
206
- with open(val_path, 'wb') as f:
207
- pickle.dump(val_dic, f)
208
-
209
- train_dic = train_data.to_dict('index')
210
- test_dic = test_data.to_dict('index')
211
 
212
- train_path = dict_dir+'/train_data.pkl'
213
- test_path = dict_dir+'/test_data.pkl'
214
-
215
- with open(train_path, 'wb') as f:
216
- pickle.dump(train_dic, f)
217
- with open(test_path, 'wb') as f:
218
- pickle.dump(test_dic, f)
219
- return dict_dir
 
 
 
 
 
 
 
 
 
 
 
220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
- def verif_dim_tensor(self, proc, out, chart, meds, lab):
223
- interv = (self.timeW//self.bucket)
224
- verif=True
225
- if self.feat_proc:
226
- if (len(proc)!= interv):
227
- verif=False
228
- if self.feat_out:
229
- if (len(out)!=interv):
230
- verif=False
231
- if self.feat_chart:
232
- if (len(chart)!=interv):
233
- verif=False
234
- if self.feat_meds:
235
- if (len(meds)!=interv):
236
- verif=False
237
- if self.feat_lab:
238
- if (len(lab)!=interv):
239
- verif=False
240
- return verif
241
-
242
- ###########################################################RAW##################################################################
243
 
244
- def _info_raw(self):
245
- features = datasets.Features(
246
- {
247
- "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
248
- "gender": datasets.Value("string"),
249
- "ethnicity": datasets.Value("string"),
250
- "insurance": datasets.Value("string"),
251
- "age": datasets.Value("int32"),
252
- "COND": datasets.Sequence(datasets.Value("string")),
253
- "MEDS": {
254
- "signal":
255
- {
256
- "id": datasets.Sequence(datasets.Value("int32")),
257
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
258
- }
259
- ,
260
- "rate":
261
- {
262
- "id": datasets.Sequence(datasets.Value("int32")),
263
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
264
- }
265
- ,
266
- "amount":
267
- {
268
- "id": datasets.Sequence(datasets.Value("int32")),
269
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
270
- }
271
-
272
- },
273
- "PROC": {
274
- "id": datasets.Sequence(datasets.Value("int32")),
275
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
276
- },
277
- "CHART/LAB":
278
- {
279
- "signal" : {
280
- "id": datasets.Sequence(datasets.Value("int32")),
281
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
282
- },
283
- "val" : {
284
- "id": datasets.Sequence(datasets.Value("int32")),
285
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
286
- },
287
- },
288
- "OUT": {
289
- "id": datasets.Sequence(datasets.Value("int32")),
290
- "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
291
- },
292
-
293
- }
294
- )
295
- return datasets.DatasetInfo(
296
- description=_DESCRIPTION,
297
- features=features,
298
- homepage=_HOMEPAGE,
299
- citation=_CITATION,
300
- )
301
 
302
- def _generate_examples_raw(self, filepath):
303
- with open(filepath, 'rb') as fp:
304
- dataDic = pickle.load(fp)
305
- for hid, data in dataDic.items():
306
- proc_features = data['Proc']
307
- meds_features = data['Med']
308
- out_features = data['Out']
309
- cond_features = data['Cond']['fids']
310
- eth= data['ethnicity']
311
- age = data['age']
312
- gender = data['gender']
313
- label = data['label']
314
- insurance=data['insurance']
315
-
316
- items = list(proc_features.keys())
317
- values =[proc_features[i] for i in items ]
318
- procs = {"id" : items,
319
- "value": values}
320
-
321
- items_outs = list(out_features.keys())
322
- values_outs =[out_features[i] for i in items_outs ]
323
- outs = {"id" : items_outs,
324
- "value": values_outs}
325
 
326
- if self.data_icu:
327
- chart_features = data['Chart']
328
- else:
329
- chart_features = data['Lab']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
331
- #chart signal
332
- if ('signal' in chart_features):
333
- items_chart_sig = list(chart_features['signal'].keys())
334
- values_chart_sig =[chart_features['signal'][i] for i in items_chart_sig ]
335
- chart_sig = {"id" : items_chart_sig,
336
- "value": values_chart_sig}
337
- else:
338
- chart_sig = {"id" : [],
339
- "value": []}
340
- #chart val
341
- if ('val' in chart_features):
342
- items_chart_val = list(chart_features['val'].keys())
343
- values_chart_val =[chart_features['val'][i] for i in items_chart_val ]
344
- chart_val = {"id" : items_chart_val,
345
- "value": values_chart_val}
346
- else:
347
- chart_val = {"id" : [],
348
- "value": []}
349
-
350
- charts = {"signal" : chart_sig,
351
- "val" : chart_val}
352
 
353
- #meds signal
354
- if ('signal' in meds_features):
355
- items_meds_sig = list(meds_features['signal'].keys())
356
- values_meds_sig =[meds_features['signal'][i] for i in items_meds_sig ]
357
- meds_sig = {"id" : items_meds_sig,
358
- "value": values_meds_sig}
359
- else:
360
- meds_sig = {"id" : [],
361
- "value": []}
362
- #meds rate
363
- if ('rate' in meds_features):
364
- items_meds_rate = list(meds_features['rate'].keys())
365
- values_meds_rate =[meds_features['rate'][i] for i in items_meds_rate ]
366
- meds_rate = {"id" : items_meds_rate,
367
- "value": values_meds_rate}
368
- else:
369
- meds_rate = {"id" : [],
370
- "value": []}
371
- #meds amount
372
- if ('amount' in meds_features):
373
- items_meds_amount = list(meds_features['amount'].keys())
374
- values_meds_amount =[meds_features['amount'][i] for i in items_meds_amount ]
375
- meds_amount = {"id" : items_meds_amount,
376
- "value": values_meds_amount}
377
  else:
378
- meds_amount = {"id" : [],
379
- "value": []}
380
-
381
- meds = {"signal" : meds_sig,
382
- "rate" : meds_rate,
383
- "amount" : meds_amount}
384
 
385
-
386
- yield int(hid), {
387
- "label" : label,
388
- "gender" : gender,
389
- "ethnicity" : eth,
390
- "insurance" : insurance,
391
- "age" : age,
392
- "COND" : cond_features,
393
- "PROC" : procs,
394
- "CHART/LAB" : charts,
395
- "OUT" : outs,
396
- "MEDS" : meds
397
- }
398
-
399
-
400
-
401
- ###########################################################ENCODED##################################################################
402
 
403
- def _info_encoded(self):
404
- features = datasets.Features(
405
- {
406
- "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
407
- "features" : datasets.Sequence(datasets.Value("float32")),
408
- }
409
- )
410
- return datasets.DatasetInfo(
411
- description=_DESCRIPTION,
412
- features=features,
413
- homepage=_HOMEPAGE,
414
- citation=_CITATION,
415
- )
416
-
417
- def _generate_examples_encoded(self, filepath):
418
- path= './data/dict/'+self.config.name.replace(" ","_")+'/ethVocab'
419
- with open(path, 'rb') as fp:
420
- ethVocab = pickle.load(fp)
421
-
422
- path= './data/dict/'+self.config.name.replace(" ","_")+'/insVocab'
423
- with open(path, 'rb') as fp:
424
- insVocab = pickle.load(fp)
425
-
426
- genVocab = ['<PAD>', 'M', 'F']
427
- gen_encoder = LabelEncoder()
428
- eth_encoder = LabelEncoder()
429
- ins_encoder = LabelEncoder()
430
- gen_encoder.fit(genVocab)
431
- eth_encoder.fit(ethVocab)
432
- ins_encoder.fit(insVocab)
433
- with open(filepath, 'rb') as fp:
434
- dico = pickle.load(fp)
435
-
436
- df = pd.DataFrame.from_dict(dico, orient='index')
437
- for i, data in df.iterrows():
438
- dyn_df,cond_df,demo=concat_data(data,self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.condDict, self.procDict, self.outDict, self.chartDict, self.medDict)
439
- dyn=dyn_df.copy()
440
- dyn.columns=dyn.columns.droplevel(0)
441
- concat_cols = [f"{col}_{t}" for t in range(dyn.shape[0]) for col in dyn.columns]
442
- demo['gender']=gen_encoder.transform(demo['gender'])
443
- demo['ethnicity']=eth_encoder.transform(demo['ethnicity'])
444
- demo['insurance']=ins_encoder.transform(demo['insurance'])
445
- label = data['label']
446
- demo=demo.drop(['label'],axis=1)
447
- X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat)
448
- X=X.values[0]
449
-
450
- interv = (self.timeW//self.bucket)
451
- size_concat = self.size_cond+ self.size_proc * interv + self.size_meds * interv+ self.size_out * interv+ self.size_chart *interv+ self.size_lab * interv + 4
452
- size_aggreg = self.size_cond+ self.size_proc + self.size_meds+ self.size_out+ self.size_chart+ self.size_lab + 4
453
-
454
- if ((self.concat and len(X)==size_concat) or ((not self.concat) and len(X)==size_aggreg)):
455
- yield int(i), {
456
- "label": label,
457
- "features": X,
458
- }
459
-
460
- ######################################################DEEP###############################################################
461
- def _info_deep(self):
462
- features = datasets.Features(
463
- {
464
- "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
465
- "DEMO": datasets.Sequence(datasets.Value("int64")),
466
- "COND" : datasets.Sequence(datasets.Value("int64")),
467
- "MEDS" : datasets.Array2D(shape=(None, self.size_meds), dtype='int64') ,
468
- "PROC" : datasets.Array2D(shape=(None, self.size_proc), dtype='int64') ,
469
- "CHART/LAB" : datasets.Array2D(shape=(None, self.size_chart), dtype='int64') ,
470
- "OUT" : datasets.Array2D(shape=(None, self.size_out), dtype='int64') ,
471
-
472
- }
473
- )
474
- return datasets.DatasetInfo(
475
- description=_DESCRIPTION,
476
- features=features,
477
- homepage=_HOMEPAGE,
478
- citation=_CITATION,
479
- )
480
-
481
-
482
- def _generate_examples_deep(self, filepath):
483
- with open(filepath, 'rb') as fp:
484
- dico = pickle.load(fp)
485
-
486
- for key, data in dico.items():
487
- stat, demo, meds, chart, out, proc, lab, y = generate_deep(data, self.config.name.replace(" ","_"), self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.condDict, self.procDict, self.outDict, self.chartDict, self.medDict)
488
- if self.verif_dim_tensor(proc, out, chart, meds, lab):
489
- if self.data_icu:
490
- yield int(key), {
491
- 'label': y,
492
- 'DEMO': demo,
493
- 'COND': stat,
494
- 'MEDS': meds,
495
- 'PROC': proc,
496
- 'CHART/LAB': chart,
497
- 'OUT': out,
498
- }
499
- else:
500
- yield int(key), {
501
- 'label': y,
502
- 'DEMO': demo,
503
- 'COND': stat,
504
- 'MEDS': meds,
505
- 'PROC': proc,
506
- 'CHART/LAB': lab,
507
- 'OUT': out,
508
- }
509
- ######################################################text##############################################################
510
- def _info_text(self):
511
- features = datasets.Features(
512
- {
513
- "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
514
- "text" : datasets.Value(dtype='string', id=None),
515
- }
516
- )
517
- return datasets.DatasetInfo(
518
- description=_DESCRIPTION,
519
- features=features,
520
- homepage=_HOMEPAGE,
521
- citation=_CITATION,
522
- )
523
-
524
- def _generate_examples_text(self, filepath):
525
- icd = pd.read_csv('icd10.txt',names=['code','description'],sep='\t')
526
- items= pd.read_csv(self.mimic_path+'/icu/d_items.csv.gz',compression='gzip', header=0)
527
- with open(filepath, 'rb') as fp:
528
- dico = pickle.load(fp)
529
-
530
- for key, data in dico.items():
531
- demo_text,cond_text,chart_text,meds_text,proc_text,out_text = generate_text(data,icd,items, self.feat_cond, self.feat_chart, self.feat_meds, self.feat_proc, self.feat_out)
532
-
533
- yield int(key),{
534
- 'label' : data['label'],
535
- 'text': demo_text+cond_text+chart_text+meds_text+proc_text+out_text
536
- }
537
-
538
- #############################################################################################################################
539
- def _info(self):
540
- self.path = self.init_cohort()
541
- self.size_cond, self.size_proc, self.size_meds, self.size_out, self.size_chart, self.size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_meds,self.feat_lab)
542
- self.condDict, self.procDict, self.outDict, self.chartDict, self.medDict = open_dict(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_lab,self.feat_meds)
543
- if (self.encoding == 'concat' or self.encoding =='aggreg'):
544
- return self._info_encoded()
545
 
546
- elif self.encoding == 'tensor' :
547
- return self._info_deep()
548
-
549
- elif self.encoding == 'text' :
550
- return self._info_text()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
 
 
 
 
 
 
 
 
 
 
 
 
552
  else:
553
- return self._info_raw()
554
-
555
- def _split_generators(self, dl_manager):
556
- data_dir = "./data/dict/"+self.config.name.replace(" ","_")
557
- if self.val_size > 0 :
558
- return [
559
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}),
560
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/val_data.pkl'}),
561
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}),
562
- ]
563
- else :
564
- return [
565
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}),
566
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}),
567
- ]
568
 
569
- def _generate_examples(self, filepath):
570
- if (self.encoding == 'concat' or self.encoding == 'aggreg'):
571
- yield from self._generate_examples_encoded(filepath)
572
 
573
- elif self.encoding == 'tensor' :
574
- yield from self._generate_examples_deep(filepath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575
 
576
- elif self.encoding == 'text' :
577
- yield from self._generate_examples_text(filepath)
578
-
579
- else :
580
- yield from self._generate_examples_raw(filepath)
 
 
1
  import pandas as pd
 
 
2
  import pickle
 
 
 
 
 
 
3
  import numpy as np
4
+ import torch
5
+
6
+
7
+ def create_vocab(file,task):
8
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
9
+ condVocab = pickle.load(fp)
10
+ condVocabDict={}
11
+ condVocabDict[0]=0
12
+ for val in range(len(condVocab)):
13
+ condVocabDict[condVocab[val]]= val+1
14
+
15
+ return condVocabDict
16
+
17
+ def gender_vocab():
18
+ genderVocabDict={}
19
+ genderVocabDict['<PAD>']=0
20
+ genderVocabDict['M']=1
21
+ genderVocabDict['F']=2
22
+
23
+ return genderVocabDict
24
+
25
+ def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag):
26
+ condVocabDict={}
27
+ procVocabDict={}
28
+ medVocabDict={}
29
+ outVocabDict={}
30
+ chartVocabDict={}
31
+ labVocabDict={}
32
+ ethVocabDict={}
33
+ ageVocabDict={}
34
+ genderVocabDict={}
35
+ insVocabDict={}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ ethVocabDict=create_vocab('ethVocab',task)
38
+ with open('./data/dict/'+task+'/ethVocabDict', 'wb') as fp:
39
+ pickle.dump(ethVocabDict, fp)
40
+
41
+ ageVocabDict=create_vocab('ageVocab',task)
42
+ with open('./data/dict/'+task+'/ageVocabDict', 'wb') as fp:
43
+ pickle.dump(ageVocabDict, fp)
44
 
45
+ genderVocabDict=gender_vocab()
46
+ with open('./data/dict/'+task+'/genderVocabDict', 'wb') as fp:
47
+ pickle.dump(genderVocabDict, fp)
48
+
49
+ insVocabDict=create_vocab('insVocab',task)
50
+ with open('./data/dict/'+task+'/insVocabDict', 'wb') as fp:
51
+ pickle.dump(insVocabDict, fp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ if diag_flag:
54
+ file='condVocab'
55
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
56
+ condVocabDict = pickle.load(fp)
57
+ if proc_flag:
58
+ file='procVocab'
59
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
60
+ procVocabDict = pickle.load(fp)
61
+ if med_flag:
62
+ file='medVocab'
63
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
64
+ medVocabDict = pickle.load(fp)
65
+ if out_flag:
66
+ file='outVocab'
67
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
68
+ outVocabDict = pickle.load(fp)
69
+ if chart_flag:
70
+ file='chartVocab'
71
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
72
+ chartVocabDict = pickle.load(fp)
73
+ if lab_flag:
74
+ file='labsVocab'
75
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
76
+ labVocabDict = pickle.load(fp)
77
 
78
+ return len(condVocabDict),len(procVocabDict),len(medVocabDict),len(outVocabDict),len(chartVocabDict),len(labVocabDict),ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict
79
+
80
+ def open_dict(task,cond, proc, out, chart, lab, med):
81
+ if cond:
82
+ with open("./data/dict/"+task+"/condVocab", 'rb') as fp:
83
+ condDict = pickle.load(fp)
84
+ else:
85
+ condDict = None
86
+ if proc:
87
+ with open("./data/dict/"+task+"/procVocab", 'rb') as fp:
88
+ procDict = pickle.load(fp)
89
+ else:
90
+ procDict = None
91
+ if out:
92
+ with open("./data/dict/"+task+"/outVocab", 'rb') as fp:
93
+ outDict = pickle.load(fp)
94
+ else:
95
+ outDict = None
96
+ if chart:
97
+ with open("./data/dict/"+task+"/chartVocab", 'rb') as fp:
98
+ chartDict = pickle.load(fp)
99
+ elif lab:
100
+ with open("./data/dict/"+task+"/labsVocab", 'rb') as fp:
101
+ chartDict = pickle.load(fp)
102
+ else:
103
+ chartDict = None
104
+ if med:
105
+ with open("./data/dict/"+task+"/medVocab", 'rb') as fp:
106
+ medDict = pickle.load(fp)
107
+ else:
108
+ medDict = None
109
+
110
+ return condDict, procDict, outDict, chartDict, medDict
111
+
112
+ def concat_data(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab,condDict, procDict, outDict, chartDict, medDict):
113
+ meds=data['Med']
114
+ proc = data['Proc']
115
+ out = data['Out']
116
+ chart = data['Chart']
117
+ cond= data['Cond']['fids']
118
+
119
+ cond_df=pd.DataFrame()
120
+ proc_df=pd.DataFrame()
121
+ out_df=pd.DataFrame()
122
+ chart_df=pd.DataFrame()
123
+ meds_df=pd.DataFrame()
124
+
125
+ #demographic
126
+ demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance'])
127
+ new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']}
128
+ demo = demo.append(new_row, ignore_index=True)
129
+
130
+ ##########COND#########
131
+ if (feat_cond):
132
+ conds=pd.DataFrame(condDict,columns=['COND'])
133
+ features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND'])
134
+
135
+ #onehot encode
136
+ if(cond ==[]):
137
+ cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND'])
138
+ cond_df=cond_df.fillna(0)
139
  else:
140
+ cond_df=pd.DataFrame(cond,columns=['COND'])
141
+ cond_df['val']=1
142
+ cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True)
143
+ cond_df=cond_df.fillna(0)
144
+ oneh = cond_df.sum().to_frame().T
145
+ combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0)
146
+ combined_oneh=combined_df.sum().to_frame().T
147
+ cond_df=combined_oneh
148
+ for c in cond_df.columns :
149
+ if c not in features:
150
+ cond_df=cond_df.drop(columns=[c])
151
+
152
+ ##########PROC#########
153
+ if (feat_proc):
154
+ if proc :
155
+ feat=proc.keys()
156
+ proc_val=[proc[key] for key in feat]
157
+ procedures=pd.DataFrame(procDict,columns=['PROC'])
158
+ features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
159
+ procs=pd.DataFrame(columns=feat)
160
+ for p,v in zip(feat,proc_val):
161
+ procs[p]=v
162
+ features=features.drop(columns=procs.columns.to_list())
163
+ proc_df = pd.concat([features,procs],axis=1).fillna(0)
164
+ proc_df.columns=pd.MultiIndex.from_product([["PROC"], proc_df.columns])
165
  else:
166
+ procedures=pd.DataFrame(procDict,columns=['PROC'])
167
+ features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
168
+ features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
169
+ proc_df=features.fillna(0)
170
+
171
+ ##########OUT#########
172
+ if (feat_out):
173
+ if out :
174
+ feat=out.keys()
175
+ out_val=[out[key] for key in feat]
176
+ outputs=pd.DataFrame(outDict,columns=['OUT'])
177
+ features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT'])
178
+ outs=pd.DataFrame(columns=feat)
179
+ for o,v in zip(feat,out_val):
180
+ outs[o]=v
181
+ features=features.drop(columns=outs.columns.to_list())
182
+ out_df = pd.concat([features,outs],axis=1).fillna(0)
183
+ out_df.columns=pd.MultiIndex.from_product([["OUT"], out_df.columns])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  else:
185
+ outputs=pd.DataFrame(outDict,columns=['OUT'])
186
+ features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT'])
187
+ features.columns=pd.MultiIndex.from_product([["OUT"], features.columns])
188
+ out_df=features.fillna(0)
189
+
190
+ ##########CHART#########
191
+ if (feat_chart):
192
+ if chart:
193
+ charts=chart['val']
194
+ feat=charts.keys()
195
+ chart_val=[charts[key] for key in feat]
196
+ charts=pd.DataFrame(chartDict,columns=['CHART'])
197
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
198
+ chart=pd.DataFrame(columns=feat)
199
+ for c,v in zip(feat,chart_val):
200
+ chart[c]=v
201
+ features=features.drop(columns=chart.columns.to_list())
202
+ chart_df = pd.concat([features,chart],axis=1).fillna(0)
203
+ chart_df.columns=pd.MultiIndex.from_product([["CHART"], chart_df.columns])
204
+ else:
205
+ charts=pd.DataFrame(chartDict,columns=['CHART'])
206
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
207
+ features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
208
+ chart_df=features.fillna(0)
209
+ ##########LAB#########
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
+ if (feat_lab):
212
+ if chart:
213
+ charts=chart['val']
214
+ feat=charts.keys()
215
+ chart_val=[charts[key] for key in feat]
216
+ charts=pd.DataFrame(chartDict,columns=['LAB'])
217
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB'])
218
+ chart=pd.DataFrame(columns=feat)
219
+ for c,v in zip(feat,chart_val):
220
+ chart[c]=v
221
+ features=features.drop(columns=chart.columns.to_list())
222
+ chart.columns=pd.MultiIndex.from_product([["LAB"], chart.columns])
223
+ chart_df = pd.concat([features,chart],axis=1).fillna(0)
224
+ chart_df.columns=pd.MultiIndex.from_product([["LAB"], chart_df.columns])
225
+ else:
226
+ charts=pd.DataFrame(chartDict,columns=['LAB'])
227
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB'])
228
+ features.columns=pd.MultiIndex.from_product([["LAB"], features.columns])
229
+ chart_df=features.fillna(0)
230
 
231
+ ###MEDS
232
+ if (feat_meds):
233
+ if meds:
234
+ feat=meds['signal'].keys()
235
+ med_val=[meds['amount'][key] for key in feat]
236
+ meds=pd.DataFrame(medDict,columns=['MEDS'])
237
+ features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS'])
238
+ med=pd.DataFrame(columns=feat)
239
+ for m,v in zip(feat,med_val):
240
+ med[m]=v
241
+ features=features.drop(columns=med.columns.to_list())
242
+ meds_df = pd.concat([features,med],axis=1).fillna(0)
243
+ meds_df.columns=pd.MultiIndex.from_product([["MEDS"], meds_df.columns])
244
+ else:
245
+ meds=pd.DataFrame(medDict,columns=['MEDS'])
246
+ features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS'])
247
+ features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
248
+ meds_df=features.fillna(0)
249
 
250
+ dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1)
251
+ return dyn_df,cond_df,demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
+ def generate_deep(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab,condDict, procDict, outDict, chartDict, medDict):
256
+ meds = []
257
+ charts = []
258
+ proc = []
259
+ out = []
260
+ lab = []
261
+ stat = []
262
+ demo = []
263
+
264
+ size_cond, size_proc, size_meds, size_out, size_chart, size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,False)
265
+ dyn,cond_df,demo=concat_data(data,task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab,condDict, procDict, outDict, chartDict, medDict)
266
+ if feat_chart:
267
+ charts = dyn['CHART'].values
268
+ if feat_meds:
269
+ meds = dyn['MEDS'].values
270
+ if feat_proc:
271
+ proc = dyn['PROC'].values
272
+ if feat_out:
273
+ out = dyn['OUT'].values
274
+ if feat_lab:
275
+ lab = dyn['LAB'].values
276
+ if feat_cond:
277
+ stat=cond_df.values[0]
278
+ y = int(demo['label'])
279
+
280
+ demo["gender"].replace(gender_vocab, inplace=True)
281
+ demo["ethnicity"].replace(eth_vocab, inplace=True)
282
+ demo["insurance"].replace(ins_vocab, inplace=True)
283
+ demo["Age"].replace(age_vocab, inplace=True)
284
+ demo=demo[["gender","ethnicity","insurance","Age"]]
285
+ demo = demo.values[0]
286
+ return stat, demo, meds, charts, out, proc, lab, y
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
+ def generate_ml(dyn, stat, demo, concat_cols, concat):
290
+ X_df = pd.DataFrame()
291
+
292
+ if concat:
293
+ dyna = dyn.copy()
294
+ dyna.columns = dyna.columns.droplevel(0)
295
+ dyna = dyna.to_numpy()
296
+ dyna = np.nan_to_num(dyna, copy=False)
297
+ dyna = dyna.reshape(1, -1)
298
+ dyn_df = pd.DataFrame(data=dyna, columns=concat_cols)
299
+ else:
300
+ dyn_df_list = []
301
+
302
+ for key in dyn.columns.levels[0]:
303
+ dyn_temp = dyn[key]
304
+ if key in ["CHART", "MEDS"]:
305
+ agg = np.nanmean(dyn_temp.to_numpy(), axis=0)
 
 
 
 
 
 
 
306
  else:
307
+ agg = np.nanmax(dyn_temp.to_numpy(), axis=0)
 
 
 
 
 
308
 
309
+ agg = pd.DataFrame(data=agg.reshape(1, -1), columns=dyn_temp.columns)
310
+ dyn_df_list.append(agg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
+ dyn_df = pd.concat(dyn_df_list, axis=0, ignore_index=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
 
314
+ X_df = pd.concat([dyn_df, stat, demo], axis=1)
315
+ return X_df
316
+
317
+
318
+ def generate_text(data,icd,items,feat_cond,feat_chart,feat_meds, feat_proc, feat_out):
319
+ #Demographics
320
+ age = data['age']
321
+ gender=data['gender']
322
+ ethn=data['ethnicity']
323
+ ins=data['insurance']
324
+ demo_text = f'The patient was a {ethn} {gender} with {ins} insurance and was {age} years old. '
325
+
326
+ #Diagnosis
327
+ if feat_cond:
328
+ conds = data.get('Cond', {}).get('fids', [])
329
+ conds=[icd[icd['code'] == code]['description'].to_string(index=False) for code in conds if not icd[icd['code'] == code].empty]
330
+ cond_text = '; '.join(conds)
331
+ cond_text = f"The patient was diagnosed with {cond_text}." if cond_text else ''
332
+ else:
333
+ cond_text = ''
334
 
335
+ #chart
336
+ if feat_chart:
337
+ chart = data.get('Chart', {})
338
+ if chart:
339
+ charts = chart.get('val', {})
340
+ feat = charts.keys()
341
+ chart_val = [charts[key] for key in feat]
342
+ chart_mean = [round(np.mean(c), 3) for c in chart_val]
343
+ feat_text = [(items[items['itemid'] == f]['label']).to_string(index=False) for f in feat]
344
+ chart_text = '; '.join(f"{mean_val} for {feat_label}" for mean_val, feat_label in zip(chart_mean, feat_text))
345
+ chart_text = f"The chart events measured were: {chart_text}."
346
  else:
347
+ chart_text = ''
348
+ else:
349
+ chart_text = ''
 
 
 
 
 
 
 
 
 
 
 
 
350
 
 
 
 
351
 
352
+ #meds
353
+ if feat_meds:
354
+ meds = data.get('Med', {})
355
+ if meds:
356
+ feat = meds['signal'].keys()
357
+ meds_val = [meds['amount'][key] for key in feat]
358
+ meds_mean = [round(np.mean(c), 3) for c in meds_val]
359
+ feat_text = [(items[items['itemid'] == f]['label']).to_string(index=False) for f in feat]
360
+ meds_text = '; '.join(f"{mean_val} of {feat_label}" for mean_val, feat_label in zip(meds_mean, feat_text))
361
+ meds_text = f"The mean amounts of medications administered during the episode were: {meds_text}."
362
+ else:
363
+ meds_text = ''
364
+ else:
365
+ meds_text = ''
366
+
367
+ #proc
368
+ if feat_proc:
369
+ proc = data['Proc']
370
+ if proc:
371
+ feat=proc.keys()
372
+ feat_text = [(items[items['itemid']==f]['label']).to_string(index=False) for f in feat]
373
+ template = 'The procedures performed were: {}.'
374
+ proc_text= template.format(';'.join(feat_text))
375
+ else:
376
+ proc_text=''
377
+ else:
378
+ proc_text=''
379
+
380
+ #out
381
+ if feat_out:
382
+ out = data['Out']
383
+ if out:
384
+ feat=out.keys()
385
+ feat_text = [(items[items['itemid']==f]['label']).to_string(index=False) for f in feat]
386
+ template ='The outputs collected were: {}.'
387
+ out_text = template.format('; '.join(feat_text))
388
+ else:
389
+ out_text=''
390
+ else:
391
+ out_text=''
392
 
393
+ return demo_text, cond_text,chart_text,meds_text,proc_text,out_text