thbndi commited on
Commit
cb07198
1 Parent(s): 58cd6d7

Upload 4 files

Browse files
Files changed (4) hide show
  1. Mimic4Dataset.py +528 -0
  2. check_config.py +118 -0
  3. dataset_utils.py +345 -0
  4. task_cohort.py +173 -0
Mimic4Dataset.py ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import datasets
4
+ import sys
5
+ import pickle
6
+ import subprocess
7
+ import shutil
8
+ from urllib.request import urlretrieve
9
+ from sklearn.model_selection import train_test_split
10
+ from sklearn.preprocessing import LabelEncoder
11
+ import yaml
12
+ from .task_cohort import task_cohort
13
+ from .dataset_utils import *
14
+
15
+
16
+
17
+ _DESCRIPTION = """\
18
+ Dataset for mimic4 data, by default for the Mortality task.
19
+ Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
20
+ The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
21
+ mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2"
22
+ If you choose a Custom task provide a configuration file for the Time series.
23
+ Currently working with Mimic-IV version 1 and 2
24
+ """
25
+ _BASE_URL = "https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main"
26
+ _HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
27
+
28
+ _CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
29
+ _GIT_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline"
30
+ _CONFIG_URLS = {'los' : f"{_BASE_URL}/config/los.config",
31
+ 'mortality' : f"{_BASE_URL}/config/mortality.config",
32
+ 'phenotype' : f"{_BASE_URL}/config/phenotype.config",
33
+ 'readmission' : f"{_BASE_URL}/config/readmission.config"
34
+ }
35
+
36
+
37
+ class Mimic4DatasetConfig(datasets.BuilderConfig):
38
+ """BuilderConfig for Mimic4Dataset."""
39
+
40
+ def __init__(
41
+ self,
42
+ **kwargs,
43
+ ):
44
+ super().__init__(**kwargs)
45
+
46
+
47
+ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
48
+ """Create Mimic4Dataset dataset from Mimic-IV data stored in user machine."""
49
+ VERSION = datasets.Version("1.0.0")
50
+
51
+ def __init__(self, **kwargs):
52
+ self.mimic_path = kwargs.pop("mimic_path", None)
53
+ self.encoding = kwargs.pop("encoding",'concat')
54
+ self.config_path = kwargs.pop("config_path",None)
55
+ self.test_size = kwargs.pop("test_size",0.2)
56
+ self.val_size = kwargs.pop("val_size",0.1)
57
+ self.generate_cohort = kwargs.pop("generate_cohort",True)
58
+
59
+ if self.encoding == 'concat':
60
+ self.concat = True
61
+ else:
62
+ self.concat = False
63
+
64
+ super().__init__(**kwargs)
65
+
66
+
67
+ BUILDER_CONFIGS = [
68
+ Mimic4DatasetConfig(
69
+ name="Phenotype",
70
+ version=VERSION,
71
+ description="Dataset for mimic4 Phenotype task"
72
+ ),
73
+ Mimic4DatasetConfig(
74
+ name="Readmission",
75
+ version=VERSION,
76
+ description="Dataset for mimic4 Readmission task"
77
+ ),
78
+ Mimic4DatasetConfig(
79
+ name="Length of Stay",
80
+ version=VERSION,
81
+ description="Dataset for mimic4 Length of Stay task"
82
+ ),
83
+ Mimic4DatasetConfig(
84
+ name="Mortality",
85
+ version=VERSION,
86
+ description="Dataset for mimic4 Mortality task"
87
+ ),
88
+ ]
89
+
90
+ DEFAULT_CONFIG_NAME = "Mortality"
91
+
92
+ def create_cohort(self):
93
+ if self.config_path==None:
94
+ if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype']
95
+ if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission']
96
+ if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los']
97
+ if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality']
98
+
99
+ version = self.mimic_path.split('/')[-1]
100
+ mimic_folder= self.mimic_path.split('/')[-2]
101
+ mimic_complete_path='/'+mimic_folder+'/'+version
102
+
103
+ current_directory = os.getcwd()
104
+ if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'):
105
+ dir =os.path.dirname(current_directory)
106
+ os.chdir(dir)
107
+ else:
108
+ #move to parent directory of mimic data
109
+ dir = self.mimic_path.replace(mimic_complete_path,'')
110
+ if dir[-1]!='/':
111
+ dir=dir+'/'
112
+ elif dir=='':
113
+ dir="./"
114
+ parent_dir = os.path.dirname(self.mimic_path)
115
+ os.chdir(parent_dir)
116
+
117
+ #####################clone git repo if doesnt exists
118
+ repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline'
119
+ if os.path.exists('MIMIC-IV-Data-Pipeline-main'):
120
+ path_bench = './MIMIC-IV-Data-Pipeline-main'
121
+ else:
122
+ path_bench ='./MIMIC-IV-Data-Pipeline-main'
123
+ subprocess.run(["git", "clone", repo_url, path_bench])
124
+ os.makedirs(path_bench+'/mimic-iv')
125
+ shutil.move(version,path_bench+'/mimic-iv')
126
+
127
+ os.chdir(path_bench)
128
+ self.mimic_path = './mimic-iv/'+version
129
+
130
+ ####################Get configurations param
131
+ #download config file if not custom
132
+ if self.config_path[0:4] == 'http':
133
+ c = self.config_path.split('/')[-1]
134
+ file_path, head = urlretrieve(self.config_path,c)
135
+ else :
136
+ file_path = self.config_path
137
+
138
+ if not os.path.exists('./config'):
139
+ os.makedirs('config')
140
+ #save config file in config folder
141
+ self.conf='./config/'+file_path.split('/')[-1]
142
+ if not os.path.exists(self.conf):
143
+ shutil.move(file_path,'./config')
144
+ with open(self.conf) as f:
145
+ config = yaml.safe_load(f)
146
+ timeW = config['timeWindow']
147
+ self.timeW=int(timeW.split()[1])
148
+ self.bucket = config['timebucket']
149
+ self.data_icu = config['icu_no_icu']=='ICU'
150
+ if self.data_icu:
151
+ self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out, self.lab = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output'], False
152
+ self.feat_lab = False
153
+ else:
154
+ self.feat_cond, self.feat_lab, self.feat_proc, self.feat_meds, self.feat_chart, self.out = config['diagnosis'], config['lab'], config['proc'], config['meds'], False, False
155
+ self.feat_out = False
156
+ self.feat_chart = False
157
+
158
+ data_dir = "./data/dict/"+self.config.name.replace(" ","_")+"/dataDic"
159
+ sys.path.append(path_bench)
160
+ config = self.config_path.split('/')[-1]
161
+
162
+ #####################create task cohort
163
+ if self.generate_cohort:
164
+ task_cohort(self.config.name.replace(" ","_"),self.mimic_path,config)
165
+
166
+ #####################Split data into train, test and val
167
+ with open(data_dir, 'rb') as fp:
168
+ dataDic = pickle.load(fp)
169
+ data = pd.DataFrame.from_dict(dataDic)
170
+
171
+ dict_dir = "./data/dict/"+self.config.name.replace(" ","_")
172
+
173
+ data=data.T
174
+ train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
175
+ if self.val_size > 0 :
176
+ train_data, val_data = train_test_split(train_data, test_size=self.val_size, random_state=42)
177
+ val_dic = val_data.to_dict('index')
178
+ val_path = dict_dir+'/val_data.pkl'
179
+ with open(val_path, 'wb') as f:
180
+ pickle.dump(val_dic, f)
181
+
182
+ train_dic = train_data.to_dict('index')
183
+ test_dic = test_data.to_dict('index')
184
+
185
+ train_path = dict_dir+'/train_data.pkl'
186
+ test_path = dict_dir+'/test_data.pkl'
187
+
188
+
189
+ with open(train_path, 'wb') as f:
190
+ pickle.dump(train_dic, f)
191
+
192
+ with open(test_path, 'wb') as f:
193
+ pickle.dump(test_dic, f)
194
+
195
+ return dict_dir
196
+
197
+ ###########################################################RAW##################################################################
198
+
199
+ def _info_raw(self):
200
+ features = datasets.Features(
201
+ {
202
+ "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
203
+ "gender": datasets.Value("string"),
204
+ "ethnicity": datasets.Value("string"),
205
+ "insurance": datasets.Value("string"),
206
+ "age": datasets.Value("int32"),
207
+ "COND": datasets.Sequence(datasets.Value("string")),
208
+ "MEDS": {
209
+ "signal":
210
+ {
211
+ "id": datasets.Sequence(datasets.Value("int32")),
212
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
213
+ }
214
+ ,
215
+ "rate":
216
+ {
217
+ "id": datasets.Sequence(datasets.Value("int32")),
218
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
219
+ }
220
+ ,
221
+ "amount":
222
+ {
223
+ "id": datasets.Sequence(datasets.Value("int32")),
224
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
225
+ }
226
+
227
+ },
228
+ "PROC": {
229
+ "id": datasets.Sequence(datasets.Value("int32")),
230
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
231
+ },
232
+ "CHART/LAB":
233
+ {
234
+ "signal" : {
235
+ "id": datasets.Sequence(datasets.Value("int32")),
236
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
237
+ },
238
+ "val" : {
239
+ "id": datasets.Sequence(datasets.Value("int32")),
240
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
241
+ },
242
+ },
243
+ "OUT": {
244
+ "id": datasets.Sequence(datasets.Value("int32")),
245
+ "value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
246
+ },
247
+
248
+ }
249
+ )
250
+ return datasets.DatasetInfo(
251
+ description=_DESCRIPTION,
252
+ features=features,
253
+ homepage=_HOMEPAGE,
254
+ citation=_CITATION,
255
+ )
256
+
257
+ def _generate_examples_raw(self, filepath):
258
+ with open(filepath, 'rb') as fp:
259
+ dataDic = pickle.load(fp)
260
+ for hid, data in dataDic.items():
261
+ proc_features = data['Proc']
262
+ meds_features = data['Med']
263
+ out_features = data['Out']
264
+ cond_features = data['Cond']['fids']
265
+ eth= data['ethnicity']
266
+ age = data['age']
267
+ gender = data['gender']
268
+ label = data['label']
269
+ insurance=data['insurance']
270
+
271
+ items = list(proc_features.keys())
272
+ values =[proc_features[i] for i in items ]
273
+ procs = {"id" : items,
274
+ "value": values}
275
+
276
+ items_outs = list(out_features.keys())
277
+ values_outs =[out_features[i] for i in items_outs ]
278
+ outs = {"id" : items_outs,
279
+ "value": values_outs}
280
+
281
+ if self.data_icu:
282
+ chart_features = data['Chart']
283
+ else:
284
+ chart_features = data['Lab']
285
+
286
+ #chart signal
287
+ if ('signal' in chart_features):
288
+ items_chart_sig = list(chart_features['signal'].keys())
289
+ values_chart_sig =[chart_features['signal'][i] for i in items_chart_sig ]
290
+ chart_sig = {"id" : items_chart_sig,
291
+ "value": values_chart_sig}
292
+ else:
293
+ chart_sig = {"id" : [],
294
+ "value": []}
295
+ #chart val
296
+ if ('val' in chart_features):
297
+ items_chart_val = list(chart_features['val'].keys())
298
+ values_chart_val =[chart_features['val'][i] for i in items_chart_val ]
299
+ chart_val = {"id" : items_chart_val,
300
+ "value": values_chart_val}
301
+ else:
302
+ chart_val = {"id" : [],
303
+ "value": []}
304
+
305
+ charts = {"signal" : chart_sig,
306
+ "val" : chart_val}
307
+
308
+ #meds signal
309
+ if ('signal' in meds_features):
310
+ items_meds_sig = list(meds_features['signal'].keys())
311
+ values_meds_sig =[meds_features['signal'][i] for i in items_meds_sig ]
312
+ meds_sig = {"id" : items_meds_sig,
313
+ "value": values_meds_sig}
314
+ else:
315
+ meds_sig = {"id" : [],
316
+ "value": []}
317
+ #meds rate
318
+ if ('rate' in meds_features):
319
+ items_meds_rate = list(meds_features['rate'].keys())
320
+ values_meds_rate =[meds_features['rate'][i] for i in items_meds_rate ]
321
+ meds_rate = {"id" : items_meds_rate,
322
+ "value": values_meds_rate}
323
+ else:
324
+ meds_rate = {"id" : [],
325
+ "value": []}
326
+ #meds amount
327
+ if ('amount' in meds_features):
328
+ items_meds_amount = list(meds_features['amount'].keys())
329
+ values_meds_amount =[meds_features['amount'][i] for i in items_meds_amount ]
330
+ meds_amount = {"id" : items_meds_amount,
331
+ "value": values_meds_amount}
332
+ else:
333
+ meds_amount = {"id" : [],
334
+ "value": []}
335
+
336
+ meds = {"signal" : meds_sig,
337
+ "rate" : meds_rate,
338
+ "amount" : meds_amount}
339
+
340
+
341
+ yield int(hid), {
342
+ "label" : label,
343
+ "gender" : gender,
344
+ "ethnicity" : eth,
345
+ "insurance" : insurance,
346
+ "age" : age,
347
+ "COND" : cond_features,
348
+ "PROC" : procs,
349
+ "CHART/LAB" : charts,
350
+ "OUT" : outs,
351
+ "MEDS" : meds
352
+ }
353
+
354
+
355
+
356
+ ###########################################################ENCODED##################################################################
357
+
358
+ def _info_encoded(self):
359
+ features = datasets.Features(
360
+ {
361
+ "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
362
+ "features" : datasets.Sequence(datasets.Value("float32")),
363
+ }
364
+ )
365
+ return datasets.DatasetInfo(
366
+ description=_DESCRIPTION,
367
+ features=features,
368
+ homepage=_HOMEPAGE,
369
+ citation=_CITATION,
370
+ )
371
+
372
+ def _generate_examples_encoded(self, filepath):
373
+ path= './data/dict/'+self.config.name.replace(" ","_")+'/ethVocab'
374
+ with open(path, 'rb') as fp:
375
+ ethVocab = pickle.load(fp)
376
+
377
+ path= './data/dict/'+self.config.name.replace(" ","_")+'/insVocab'
378
+ with open(path, 'rb') as fp:
379
+ insVocab = pickle.load(fp)
380
+
381
+ genVocab = ['<PAD>', 'M', 'F']
382
+ gen_encoder = LabelEncoder()
383
+ eth_encoder = LabelEncoder()
384
+ ins_encoder = LabelEncoder()
385
+ gen_encoder.fit(genVocab)
386
+ eth_encoder.fit(ethVocab)
387
+ ins_encoder.fit(insVocab)
388
+ with open(filepath, 'rb') as fp:
389
+ dico = pickle.load(fp)
390
+
391
+ df = pd.DataFrame.from_dict(dico, orient='index')
392
+ task=self.config.name.replace(" ","_")
393
+
394
+ for i, data in df.iterrows():
395
+ concat_cols=[]
396
+ dyn_df,cond_df,demo=concat_data(data,task,self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
397
+ dyn=dyn_df.copy()
398
+ dyn.columns=dyn.columns.droplevel(0)
399
+ cols=dyn.columns
400
+ time=dyn.shape[0]
401
+ for t in range(time):
402
+ cols_t = [str(x) + "_"+str(t) for x in cols]
403
+ concat_cols.extend(cols_t)
404
+ demo['gender']=gen_encoder.transform(demo['gender'])
405
+ demo['ethnicity']=eth_encoder.transform(demo['ethnicity'])
406
+ demo['insurance']=ins_encoder.transform(demo['insurance'])
407
+ label = data['label']
408
+ demo=demo.drop(['label'],axis=1)
409
+ X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat)
410
+ X=X.values.tolist()[0]
411
+ yield int(i), {
412
+ "label": label,
413
+ "features": X,
414
+ }
415
+ ######################################################DEEP###############################################################
416
+ def _info_deep(self):
417
+ features = datasets.Features(
418
+ {
419
+ "label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
420
+ "DEMO": datasets.Sequence(datasets.Value("int64")),
421
+ "COND" : datasets.Sequence(datasets.Value("int64")),
422
+ "MEDS" : datasets.Array2D(shape=(None, self.size_meds), dtype='int64') ,
423
+ "PROC" : datasets.Array2D(shape=(None, self.size_proc), dtype='int64') ,
424
+ "CHART/LAB" : datasets.Array2D(shape=(None, self.size_chart), dtype='int64') ,
425
+ "OUT" : datasets.Array2D(shape=(None, self.size_out), dtype='int64') ,
426
+
427
+ }
428
+ )
429
+ return datasets.DatasetInfo(
430
+ description=_DESCRIPTION,
431
+ features=features,
432
+ homepage=_HOMEPAGE,
433
+ citation=_CITATION,
434
+ )
435
+
436
+
437
+ def _generate_examples_deep(self, filepath):
438
+ with open(filepath, 'rb') as fp:
439
+ dico = pickle.load(fp)
440
+ task=self.config.name.replace(" ","_")
441
+ for key, data in dico.items():
442
+ stat, demo, meds, chart, out, proc, lab, y = generate_deep(data, task, self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
443
+
444
+ verri=True
445
+ if self.feat_proc:
446
+ if (len(proc)<(self.timeW//self.bucket)):
447
+ verri=False
448
+ if self.feat_out:
449
+ if (len(out)<(self.timeW//self.bucket)):
450
+ verri=False
451
+ if self.feat_chart:
452
+ if (len(chart)<(self.timeW//self.bucket)):
453
+ verri=False
454
+ if self.feat_meds:
455
+ if (len(meds)<(self.timeW//self.bucket)):
456
+ verri=False
457
+ if self.feat_lab:
458
+ if (len(lab)<(self.timeW//self.bucket)):
459
+ verri=False
460
+ if verri:
461
+ if self.data_icu:
462
+ yield int(key), {
463
+ 'label': y,
464
+ 'DEMO': demo,
465
+ 'COND': stat,
466
+ 'MEDS': meds,
467
+ 'PROC': proc,
468
+ 'CHART/LAB': chart,
469
+ 'OUT': out,
470
+ }
471
+ else:
472
+ yield int(key), {
473
+ 'label': y,
474
+ 'DEMO': demo,
475
+ 'COND': stat,
476
+ 'MEDS': meds,
477
+ 'PROC': proc,
478
+ 'CHART/LAB': lab,
479
+ 'OUT': out,
480
+ }
481
+ else:
482
+ continue
483
+
484
+
485
+ #############################################################################################################################
486
+ def _info(self):
487
+ self.path = self.create_cohort()
488
+ self.size_cond, self.size_proc, self.size_meds, self.size_out, self.size_chart, self.size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_meds,self.feat_lab)
489
+
490
+ if self.encoding == 'concat' :
491
+ return self._info_encoded()
492
+
493
+ elif self.encoding == 'aggreg' :
494
+ return self._info_encoded()
495
+
496
+ elif self.encoding == 'tensor' :
497
+ return self._info_deep()
498
+
499
+ else:
500
+ return self._info_raw()
501
+
502
+
503
+ def _split_generators(self, dl_manager):
504
+ csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
505
+ if self.val_size > 0 :
506
+ return [
507
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
508
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}),
509
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
510
+ ]
511
+ else :
512
+ return [
513
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
514
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
515
+ ]
516
+
517
+ def _generate_examples(self, filepath):
518
+
519
+ if self.encoding == 'concat' :
520
+ yield from self._generate_examples_encoded(filepath)
521
+
522
+ elif self.encoding == 'aggreg' :
523
+ yield from self._generate_examples_encoded(filepath)
524
+
525
+ elif self.encoding == 'tensor' :
526
+ yield from self._generate_examples_deep(filepath)
527
+ else :
528
+ yield from self._generate_examples_raw(filepath)
check_config.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def check_config(task,config_file):
3
+ with open(config_file) as f:
4
+ config = yaml.safe_load(f)
5
+
6
+ if task=='Phenotype':
7
+ disease_label = config['disease_label']
8
+ else :
9
+ disease_label = ""
10
+ time = config['timePrediction']
11
+ label = task
12
+ timeW = config['timeWindow']
13
+ include=int(timeW.split()[1])
14
+ bucket = config['timebucket']
15
+ radimp = config['radimp']
16
+ predW = config['predW']
17
+ disease_filter = config['disease_filter']
18
+ icu_no_icu = config['icu_no_icu']
19
+ groupingDiag = config['groupingDiag']
20
+
21
+ assert( icu_no_icu in ['ICU','Non-ICU' ], "Chossen data should be one of the following: ICU, Non-ICU")
22
+ data_icu = icu_no_icu=='ICU'
23
+
24
+ if data_icu:
25
+ chart_flag = config['chart']
26
+ output_flag = config['output']
27
+ select_chart = config['select_chart']
28
+ lab_flag = False
29
+ select_lab = False
30
+ else:
31
+ lab_flag =config['lab']
32
+ select_lab = config['select_lab']
33
+ groupingMed = config['groupingMed']
34
+ groupingProc = config['groupingProc']
35
+ chart_flag = False
36
+ output_flag = False
37
+ select_chart = False
38
+
39
+
40
+ diag_flag= config['diagnosis']
41
+ proc_flag = config['proc']
42
+ meds_flag = config['meds']
43
+ select_diag= config['select_diag']
44
+ select_med= config['select_med']
45
+ select_proc= config['select_proc']
46
+ select_out = config['select_out']
47
+
48
+ outlier_removal=config['outlier_removal']
49
+ thresh=config['outlier']
50
+ left_thresh=config['left_outlier']
51
+
52
+ if data_icu:
53
+ assert (isinstance(select_diag,bool) and isinstance(select_med,bool) and isinstance(select_proc,bool) and isinstance(select_out,bool) and isinstance(select_chart,bool), " select_diag, select_chart, select_med, select_proc, select_out should be boolean")
54
+ assert (isinstance(chart_flag,bool) and isinstance(output_flag,bool) and isinstance(diag_flag,bool) and isinstance(proc_flag,bool) and isinstance(meds_flag,bool), "chart_flag, output_flag, diag_flag, proc_flag, meds_flag should be boolean")
55
+
56
+ else:
57
+ assert (isinstance(select_diag,bool) and isinstance(select_med,bool) and isinstance(select_proc,bool) and isinstance(select_out,bool) and isinstance(select_lab,bool), " select_diag, select_lab, select_med, select_proc, select_out should be boolean")
58
+ assert (isinstance(lab_flag,bool) and isinstance(diag_flag,bool) and isinstance(proc_flag,bool) and isinstance(meds_flag,bool), "lab_flag, diag_flag, proc_flag, meds_flag should be boolean")
59
+
60
+ if task=='Phenotype':
61
+ if disease_label=='Heart Failure':
62
+ label='Readmission'
63
+ time=30
64
+ disease_label='I50'
65
+ elif disease_label=='CAD':
66
+ label='Readmission'
67
+ time=30
68
+ disease_label='I25'
69
+ elif disease_label=='CKD':
70
+ label='Readmission'
71
+ time=30
72
+ disease_label='N18'
73
+ elif disease_label=='COPD':
74
+ label='Readmission'
75
+ time=30
76
+ disease_label='J44'
77
+ else :
78
+ raise ValueError('Disease label not correct provide one in the list: Heart Failure, CAD, CKD, COPD')
79
+ predW=0
80
+ assert (timeW[0]=='Last' and include<=72 and include>=24, "Time window should be between Last 24 and Last 72")
81
+
82
+ elif task=='Mortality':
83
+ time=0
84
+ label= 'Mortality'
85
+ assert (predW<=8 and predW>=2, "Prediction window should be between 2 and 8")
86
+ assert (timeW[0]=='Fisrt' and include<=72 and include>=24, "Time window should be between First 24 and First 72")
87
+
88
+ elif task=='Length_of_Stay':
89
+ label= 'Length of Stay'
90
+ assert (timeW[0]=='Fisrt' and include<=72 and include>=24, "Time window should be between Fisrt 24 and Fisrt 72")
91
+ assert (time<=10 and time>=1, "Length of stay should be between 1 and 10")
92
+ predW=0
93
+
94
+ elif task=='Readmission':
95
+ label= 'Readmission'
96
+ assert (timeW[0]=='Last' and include<=72 and include>=24, "Time window should be between Last 24 and Last 72")
97
+ assert (time<=150 and time>=10 and time%10==0, "Readmission window should be between 10 and 150 with a step of 10")
98
+ predW=0
99
+
100
+ else:
101
+ raise ValueError('Task not correct')
102
+
103
+ assert( disease_filter in ['Heart Failure','COPD','CKD','CAD',""], "Disease filter should be one of the following: Heart Failure, COPD, CKD, CAD or empty")
104
+ assert( groupingDiag in ['Convert ICD-9 to ICD-10 and group ICD-10 codes','Keep both ICD-9 and ICD-10 codes','Convert ICD-9 to ICD-10 codes'], "Grouping ICD should be one of the following: Convert ICD-9 to ICD-10 and group ICD-10 codes, Keep both ICD-9 and ICD-10 codes, Convert ICD-9 to ICD-10 codes")
105
+ assert (bucket<=6 and bucket>=1 and isinstance(bucket, int), "Time bucket should be between 1 and 6 and an integer")
106
+ assert (radimp in ['No Imputation', 'forward fill and mean','forward fill and median'], "imputation should be one of the following: No Imputation, forward fill and mean, forward fill and median")
107
+ if chart_flag:
108
+ assert (left_thresh>=0 and left_thresh<=10 and isinstance(left_thresh, int), "Left outlier threshold should be between 0 and 10 and an integer")
109
+ assert (thresh>=90 and thresh<=99 and isinstance(thresh, int), "Outlier threshold should be between 90 and 99 and an integer")
110
+ assert (outlier_removal in ['No outlier detection','Impute Outlier (default:98)','Remove outliers (default:98)'], "Outlier removal should be one of the following: No outlier detection, Impute Outlier (default:98), Remove outliers (default:98)")
111
+ if lab_flag:
112
+ assert (left_thresh>=0 and left_thresh<=10 and isinstance(left_thresh, int), "Left outlier threshold should be between 0 and 10 and an integer")
113
+ assert (thresh>=90 and thresh<=99 and isinstance(thresh, int), "Outlier threshold should be between 90 and 99 and an integer")
114
+ assert (outlier_removal in ['No outlier detection','Impute Outlier (default:98)','Remove outliers (default:98)'], "Outlier removal should be one of the following: No outlier detection, Impute Outlier (default:98), Remove outliers (default:98)")
115
+ assert (groupingProc in ['ICD-9 and ICD-10','ICD-10'], "Grouping procedure should be one of the following: ICD-9 and ICD-10, ICD-10")
116
+ assert (groupingMed in ['Yes','No'], "Do you want to group Medication codes to use Non propietary names? : Grouping medication should be one of the following: Yes, No")
117
+
118
+ return label, time, disease_label, predW
dataset_utils.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pickle
3
+ import numpy as np
4
+ import torch
5
+
6
+
7
+ def create_vocab(file,task):
8
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
9
+ condVocab = pickle.load(fp)
10
+ condVocabDict={}
11
+ condVocabDict[0]=0
12
+ for val in range(len(condVocab)):
13
+ condVocabDict[condVocab[val]]= val+1
14
+
15
+ return condVocabDict
16
+
17
+ def gender_vocab():
18
+ genderVocabDict={}
19
+ genderVocabDict['<PAD>']=0
20
+ genderVocabDict['M']=1
21
+ genderVocabDict['F']=2
22
+
23
+ return genderVocabDict
24
+
25
+ def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag):
26
+ condVocabDict={}
27
+ procVocabDict={}
28
+ medVocabDict={}
29
+ outVocabDict={}
30
+ chartVocabDict={}
31
+ labVocabDict={}
32
+ ethVocabDict={}
33
+ ageVocabDict={}
34
+ genderVocabDict={}
35
+ insVocabDict={}
36
+
37
+ ethVocabDict=create_vocab('ethVocab',task)
38
+ with open('./data/dict/'+task+'/ethVocabDict', 'wb') as fp:
39
+ pickle.dump(ethVocabDict, fp)
40
+
41
+ ageVocabDict=create_vocab('ageVocab',task)
42
+ with open('./data/dict/'+task+'/ageVocabDict', 'wb') as fp:
43
+ pickle.dump(ageVocabDict, fp)
44
+
45
+ genderVocabDict=gender_vocab()
46
+ with open('./data/dict/'+task+'/genderVocabDict', 'wb') as fp:
47
+ pickle.dump(genderVocabDict, fp)
48
+
49
+ insVocabDict=create_vocab('insVocab',task)
50
+ with open('./data/dict/'+task+'/insVocabDict', 'wb') as fp:
51
+ pickle.dump(insVocabDict, fp)
52
+
53
+ if diag_flag:
54
+ file='condVocab'
55
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
56
+ condVocabDict = pickle.load(fp)
57
+ if proc_flag:
58
+ file='procVocab'
59
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
60
+ procVocabDict = pickle.load(fp)
61
+ if med_flag:
62
+ file='medVocab'
63
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
64
+ medVocabDict = pickle.load(fp)
65
+ if out_flag:
66
+ file='outVocab'
67
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
68
+ outVocabDict = pickle.load(fp)
69
+ if chart_flag:
70
+ file='chartVocab'
71
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
72
+ chartVocabDict = pickle.load(fp)
73
+ if lab_flag:
74
+ file='labsVocab'
75
+ with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
76
+ labVocabDict = pickle.load(fp)
77
+
78
+ return len(condVocabDict),len(procVocabDict),len(medVocabDict),len(outVocabDict),len(chartVocabDict),len(labVocabDict),ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict
79
+
80
+ def concat_data(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab):
81
+ meds=data['Med']
82
+ proc = data['Proc']
83
+ out = data['Out']
84
+ chart = data['Chart']
85
+ cond= data['Cond']['fids']
86
+
87
+ cond_df=pd.DataFrame()
88
+ proc_df=pd.DataFrame()
89
+ out_df=pd.DataFrame()
90
+ chart_df=pd.DataFrame()
91
+ meds_df=pd.DataFrame()
92
+
93
+ #demographic
94
+ demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance'])
95
+ new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']}
96
+ demo = demo.append(new_row, ignore_index=True)
97
+
98
+ ##########COND#########
99
+ if (feat_cond):
100
+ #get all conds
101
+ with open("./data/dict/"+task+"/condVocab", 'rb') as fp:
102
+ conDict = pickle.load(fp)
103
+ conds=pd.DataFrame(conDict,columns=['COND'])
104
+ features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND'])
105
+
106
+ #onehot encode
107
+ if(cond ==[]):
108
+ cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND'])
109
+ cond_df=cond_df.fillna(0)
110
+ else:
111
+ cond_df=pd.DataFrame(cond,columns=['COND'])
112
+ cond_df['val']=1
113
+ cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True)
114
+ cond_df=cond_df.fillna(0)
115
+ oneh = cond_df.sum().to_frame().T
116
+ combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0)
117
+ combined_oneh=combined_df.sum().to_frame().T
118
+ cond_df=combined_oneh
119
+
120
+ ##########PROC#########
121
+ if (feat_proc):
122
+ with open("./data/dict/"+task+"/procVocab", 'rb') as fp:
123
+ procDic = pickle.load(fp)
124
+
125
+ if proc :
126
+ feat=proc.keys()
127
+ proc_val=[proc[key] for key in feat]
128
+ procedures=pd.DataFrame(procDic,columns=['PROC'])
129
+ features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
130
+ features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
131
+ procs=pd.DataFrame(columns=feat)
132
+ for p,v in zip(feat,proc_val):
133
+ procs[p]=v
134
+ procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns])
135
+ proc_df = pd.concat([features,procs],ignore_index=True).fillna(0)
136
+ else:
137
+ procedures=pd.DataFrame(procDic,columns=['PROC'])
138
+ features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
139
+ features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
140
+ proc_df=features.fillna(0)
141
+
142
+ ##########OUT#########
143
+ if (feat_out):
144
+ with open("./data/dict/"+task+"/outVocab", 'rb') as fp:
145
+ outDic = pickle.load(fp)
146
+
147
+ if out :
148
+ feat=out.keys()
149
+ out_val=[out[key] for key in feat]
150
+ outputs=pd.DataFrame(outDic,columns=['OUT'])
151
+ features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT'])
152
+ features.columns=pd.MultiIndex.from_product([["OUT"], features.columns])
153
+ outs=pd.DataFrame(columns=feat)
154
+ for o,v in zip(feat,out_val):
155
+ outs[o]=v
156
+ outs.columns=pd.MultiIndex.from_product([["OUT"], outs.columns])
157
+ out_df = pd.concat([features,outs],ignore_index=True).fillna(0)
158
+ else:
159
+ outputs=pd.DataFrame(outDic,columns=['OUT'])
160
+ features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT'])
161
+ features.columns=pd.MultiIndex.from_product([["OUT"], features.columns])
162
+ out_df=features.fillna(0)
163
+
164
+ ##########CHART#########
165
+ if (feat_chart):
166
+ with open("./data/dict/"+task+"/chartVocab", 'rb') as fp:
167
+ chartDic = pickle.load(fp)
168
+
169
+ if chart:
170
+ charts=chart['val']
171
+ feat=charts.keys()
172
+ chart_val=[charts[key] for key in feat]
173
+ charts=pd.DataFrame(chartDic,columns=['CHART'])
174
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
175
+ features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
176
+
177
+ chart=pd.DataFrame(columns=feat)
178
+ for c,v in zip(feat,chart_val):
179
+ chart[c]=v
180
+ chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns])
181
+ chart_df = pd.concat([features,chart],ignore_index=True).fillna(0)
182
+ else:
183
+ charts=pd.DataFrame(chartDic,columns=['CHART'])
184
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
185
+ features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
186
+ chart_df=features.fillna(0)
187
+
188
+ ##########LAB#########
189
+ if (feat_lab):
190
+ with open("./data/dict/"+task+"/labsVocab", 'rb') as fp:
191
+ chartDic = pickle.load(fp)
192
+
193
+ if chart:
194
+ charts=chart['val']
195
+ feat=charts.keys()
196
+ chart_val=[charts[key] for key in feat]
197
+ charts=pd.DataFrame(chartDic,columns=['LAB'])
198
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB'])
199
+ features.columns=pd.MultiIndex.from_product([["LAB"], features.columns])
200
+
201
+ chart=pd.DataFrame(columns=feat)
202
+ for c,v in zip(feat,chart_val):
203
+ chart[c]=v
204
+ chart.columns=pd.MultiIndex.from_product([["LAB"], chart.columns])
205
+ chart_df = pd.concat([features,chart],ignore_index=True).fillna(0)
206
+ else:
207
+ charts=pd.DataFrame(chartDic,columns=['LAB'])
208
+ features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB'])
209
+ features.columns=pd.MultiIndex.from_product([["LAB"], features.columns])
210
+ chart_df=features.fillna(0)
211
+
212
+ ###MEDS
213
+ if (feat_meds):
214
+ with open("./data/dict/"+task+"/medVocab", 'rb') as fp:
215
+ medDic = pickle.load(fp)
216
+
217
+ if meds:
218
+ feat=meds['signal'].keys()
219
+ med_val=[meds['amount'][key] for key in feat]
220
+ meds=pd.DataFrame(medDic,columns=['MEDS'])
221
+ features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS'])
222
+ features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
223
+
224
+ med=pd.DataFrame(columns=feat)
225
+ for m,v in zip(feat,med_val):
226
+ med[m]=v
227
+ med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns])
228
+ meds_df = pd.concat([features,med],ignore_index=True).fillna(0)
229
+ else:
230
+ meds=pd.DataFrame(medDic,columns=['MEDS'])
231
+ features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS'])
232
+ features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
233
+ meds_df=features.fillna(0)
234
+
235
+ dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1)
236
+ return dyn_df,cond_df,demo
237
+
238
+
239
+
240
+ def generate_deep(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab):
241
+ stat_df = torch.zeros(size=(1,0))
242
+ demo_df = torch.zeros(size=(1,0))
243
+ meds = torch.zeros(size=(0,0))
244
+ charts = torch.zeros(size=(0,0))
245
+ proc = torch.zeros(size=(0,0))
246
+ out = torch.zeros(size=(0,0))
247
+ lab = torch.zeros(size=(0,0))
248
+ stat_df = torch.zeros(size=(1,0))
249
+ demo_df = torch.zeros(size=(1,0))
250
+
251
+ size_cond, size_proc, size_meds, size_out, size_chart, size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,False)
252
+ dyn,cond_df,demo=concat_data(data,task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab)
253
+ if feat_chart:
254
+ charts = dyn['CHART']
255
+ charts=charts.to_numpy()
256
+ charts = torch.tensor(charts, dtype=torch.long)
257
+ charts = charts.tolist()
258
+
259
+ if feat_meds:
260
+ meds = dyn['MEDS']
261
+ meds=meds.to_numpy()
262
+ meds = torch.tensor(meds, dtype=torch.long)
263
+ meds = meds.tolist()
264
+
265
+ if feat_proc:
266
+ proc = dyn['PROC']
267
+ proc=proc.to_numpy()
268
+ proc = torch.tensor(proc, dtype=torch.long)
269
+ proc = proc.tolist()
270
+
271
+ if feat_out:
272
+ out = dyn['OUT']
273
+ out=out.to_numpy()
274
+ out = torch.tensor(out, dtype=torch.long)
275
+ out = out.tolist()
276
+
277
+ if feat_lab:
278
+ lab = dyn['LAB']
279
+ lab=lab.to_numpy()
280
+ lab = torch.tensor(lab, dtype=torch.long)
281
+ lab = lab.tolist()
282
+
283
+ stat=cond_df
284
+ stat = stat.to_numpy()
285
+ stat = torch.tensor(stat)
286
+ if stat_df[0].nelement():
287
+ stat_df = torch.cat((stat_df,stat),0)
288
+ else:
289
+ stat_df = stat
290
+
291
+ y = int(demo['label'])
292
+ demo["gender"].replace(gender_vocab, inplace=True)
293
+ demo["ethnicity"].replace(eth_vocab, inplace=True)
294
+ demo["insurance"].replace(ins_vocab, inplace=True)
295
+ demo["Age"].replace(age_vocab, inplace=True)
296
+ demo=demo[["gender","ethnicity","insurance","Age"]]
297
+ demo = demo.values
298
+ demo = torch.tensor(demo)
299
+ if demo_df[0].nelement():
300
+ demo_df = torch.cat((demo_df,demo),0)
301
+ else:
302
+ demo_df = demo
303
+ stat_df = torch.tensor(stat_df)
304
+ stat_df = stat_df.type(torch.LongTensor)
305
+ stat_df = stat_df.squeeze()
306
+ demo_df = torch.tensor(demo_df)
307
+ demo_df = demo_df.type(torch.LongTensor)
308
+ demo_df=demo_df.squeeze()
309
+ y_df = torch.tensor(y)
310
+ y_df = y_df.type(torch.LongTensor)
311
+
312
+ return stat_df, demo_df, meds, charts, out, proc, lab, y
313
+
314
+
315
+ def generate_ml(dyn,stat,demo,concat_cols,concat):
316
+ X_df=pd.DataFrame()
317
+ if concat:
318
+ dyna=dyn.copy()
319
+ dyna.columns=dyna.columns.droplevel(0)
320
+ dyna=dyna.to_numpy()
321
+ dyna=np.nan_to_num(dyna, copy=False)
322
+ dyna=dyna.reshape(1,-1)
323
+ dyn_df=pd.DataFrame(data=dyna,columns=concat_cols)
324
+ else:
325
+ dyn_df=pd.DataFrame()
326
+ for key in dyn.columns.levels[0]:
327
+ dyn_temp=dyn[key]
328
+ if ((key=="CHART") or (key=="MEDS")):
329
+ agg=dyn_temp.aggregate("mean")
330
+ agg=agg.reset_index()
331
+ else:
332
+ agg=dyn_temp.aggregate("max")
333
+ agg=agg.reset_index()
334
+
335
+ if dyn_df.empty:
336
+ dyn_df=agg
337
+ else:
338
+ dyn_df=pd.concat([dyn_df,agg],axis=0)
339
+ dyn_df=dyn_df.T
340
+ dyn_df.columns = dyn_df.iloc[0]
341
+ dyn_df=dyn_df.iloc[1:,:]
342
+
343
+ X_df=pd.concat([dyn_df,stat],axis=1)
344
+ X_df=pd.concat([X_df,demo],axis=1)
345
+ return X_df
task_cohort.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import yaml
4
+ import time
5
+ from .check_config import check_config
6
+ from .day_intervals_cohort_v22 import *
7
+ from .data_generation_icu_modify import *
8
+ from .data_generation_modify import *
9
+
10
+ def task_cohort(task, mimic_path, config_path):
11
+ sys.path.append('./preprocessing/day_intervals_preproc')
12
+ sys.path.append('./utils')
13
+ sys.path.append('./preprocessing/hosp_module_preproc')
14
+ sys.path.append('./model')
15
+ import day_intervals_cohort
16
+ import feature_selection_icu
17
+ import feature_selection_hosp
18
+
19
+
20
+ root_dir = os.path.dirname(os.path.abspath('UserInterface.ipynb'))
21
+ config_path='./config/'+config_path
22
+ with open(config_path) as f:
23
+ config = yaml.safe_load(f)
24
+ version_path = mimic_path+'/'
25
+ print(version_path)
26
+ version = mimic_path.split('/')[-1][0]
27
+ start = time.time()
28
+ #----------------------------------------------config----------------------------------------------------
29
+ label, tim, disease_label, predW = check_config(task,config_path)
30
+ icu_no_icu = config['icu_no_icu']
31
+ timeW = config['timeWindow']
32
+ include=int(timeW.split()[1])
33
+ bucket = config['timebucket']
34
+ radimp = config['radimp']
35
+
36
+ diag_flag = config['diagnosis']
37
+ proc_flag= config['proc']
38
+ med_flag = config['meds']
39
+ disease_filter = config['disease_filter']
40
+ groupingDiag = config['groupingDiag']
41
+ select_diag= config['select_diag']
42
+ select_med= config['select_med']
43
+ select_proc= config['select_proc']
44
+
45
+ if icu_no_icu=='ICU':
46
+ out_flag = config['output']
47
+ chart_flag = config['chart']
48
+ select_out= config['select_out']
49
+ select_chart= config['select_chart']
50
+ lab_flag = False
51
+ select_lab = False
52
+ else:
53
+ lab_flag = config['lab']
54
+ groupingMed = config['groupingMed']
55
+ groupingProc = config['groupingProc']
56
+ select_lab= config['select_lab']
57
+ out_flag = False
58
+ chart_flag = False
59
+ select_out= False
60
+ select_chart= False
61
+
62
+ # -------------------------------------------------------------------------------------------------------------
63
+
64
+ data_icu=icu_no_icu=="ICU"
65
+ data_mort=label=="Mortality"
66
+ data_admn=label=='Readmission'
67
+ data_los=label=='Length of Stay'
68
+
69
+ if (disease_filter=="Heart Failure"):
70
+ icd_code='I50'
71
+ elif (disease_filter=="CKD"):
72
+ icd_code='N18'
73
+ elif (disease_filter=="COPD"):
74
+ icd_code='J44'
75
+ elif (disease_filter=="CAD"):
76
+ icd_code='I25'
77
+ else:
78
+ icd_code='No Disease Filter'
79
+
80
+ #-----------------------------------------------EXTRACT MIMIC-----------------------------------------------------
81
+ if version == '2':
82
+ cohort_output = extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label)
83
+
84
+ elif version == '1':
85
+ cohort_output = day_intervals_cohort.extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label)
86
+ #----------------------------------------------FEATURES-------------------------------------------------------
87
+
88
+ if data_icu :
89
+ feature_selection_icu.feature_icu(cohort_output, version_path,diag_flag,out_flag,chart_flag,proc_flag,med_flag)
90
+ else:
91
+ feature_selection_hosp.feature_nonicu(cohort_output, version_path,diag_flag,lab_flag,proc_flag,med_flag)
92
+ #----------------------------------------------GROUPING-------------------------------------------------------
93
+ if data_icu:
94
+ if diag_flag:
95
+ group_diag=groupingDiag
96
+ feature_selection_icu.preprocess_features_icu(cohort_output, diag_flag, group_diag,False,False,False,0,0)
97
+
98
+ else:
99
+ if diag_flag:
100
+ group_diag=groupingDiag
101
+ if med_flag:
102
+ group_med=groupingMed
103
+ if proc_flag:
104
+ group_proc=groupingProc
105
+ feature_selection_hosp.preprocess_features_hosp(cohort_output, diag_flag,proc_flag,med_flag,False,group_diag,group_med,group_proc,False,False,0,0)
106
+ #----------------------------------------------SUMMARY-------------------------------------------------------
107
+ if data_icu:
108
+ feature_selection_icu.generate_summary_icu(diag_flag,proc_flag,med_flag,out_flag,chart_flag)
109
+ else:
110
+ feature_selection_hosp.generate_summary_hosp(diag_flag,proc_flag,med_flag,lab_flag)
111
+ #----------------------------------------------FEATURE SELECTION---------------------------------------------
112
+
113
+ #----------------------------------------------FEATURE SELECTION---------------------------------------------
114
+
115
+ if data_icu:
116
+ if select_chart or select_out or select_diag or select_med or select_proc:
117
+ if select_chart:
118
+ input('Please edit list of codes in ./data/summary/chart_features.csv to select the chart items to keep and press enter to continue')
119
+ if select_out:
120
+ input('Please edit list of codes in ./data/summary/out_features.csv to select the output items to keep and press enter to continue')
121
+ if select_diag:
122
+ input('Please edit list of codes in ./data/summary/diag_features.csv to select the diagnosis ids to keep and press enter to continue')
123
+ if select_med:
124
+ input('Please edit list of codes in ./data/summary/med_features.csv to select the meds items to keep and press enter to continue')
125
+ if select_proc:
126
+ input('Please edit list of codes in ./data/summary/proc_features.csv to select the procedures ids to keep and press enter to continue')
127
+ feature_selection_icu.features_selection_icu(cohort_output, diag_flag,proc_flag,med_flag,out_flag, chart_flag,select_diag,select_med,select_proc,select_out,select_chart)
128
+ else:
129
+ if select_diag or select_med or select_proc or select_lab:
130
+ if select_diag:
131
+ input('Please edit list of codes in ./data/summary/diag_features.csv to select the diagnosis ids to keep and press enter to continue')
132
+ if select_med:
133
+ input('Please edit list of codes in ./data/summary/med_features.csv to select the meds items to keep and press enter to continue')
134
+ if select_proc:
135
+ input('Please edit list of codes in ./data/summary/proc_features.csv to select the procedures ids to keep and press enter to continue')
136
+ if select_lab:
137
+ input('Please edit list of codes in ./data/summary/labs_features.csv to select the labs items to keep and press enter to continue')
138
+ feature_selection_hosp.features_selection_hosp(cohort_output, diag_flag,proc_flag,med_flag,lab_flag,select_diag,select_med,select_proc,select_lab)
139
+
140
+ #---------------------------------------CLEANING OF FEATURES-----------------------------------------------
141
+ thresh=0
142
+ if data_icu:
143
+ if chart_flag:
144
+ outlier_removal=config['outlier_removal']
145
+ clean_chart=outlier_removal!='No outlier detection'
146
+ impute_outlier_chart=outlier_removal=='Impute Outlier (default:98)'
147
+ thresh=config['outlier']
148
+ left_thresh=config['left_outlier']
149
+ feature_selection_icu.preprocess_features_icu(cohort_output, False, False,chart_flag,clean_chart,impute_outlier_chart,thresh,left_thresh)
150
+ else:
151
+ if lab_flag:
152
+ outlier_removal=config['outlier_removal']
153
+ clean_chart=outlier_removal!='No outlier detection'
154
+ impute_outlier_chart=outlier_removal=='Impute Outlier (default:98)'
155
+ thresh=config['outlier']
156
+ left_thresh=config['left_outlier']
157
+ feature_selection_hosp.preprocess_features_hosp(cohort_output, False,False, False,lab_flag,False,False,False,clean_chart,impute_outlier_chart,thresh,left_thresh)
158
+ # ---------------------------------------tim-Series Representation--------------------------------------------
159
+ if radimp == 'forward fill and mean' :
160
+ impute='Mean'
161
+ elif radimp =='forward fill and median':
162
+ impute = 'Median'
163
+ else :
164
+ impute = False
165
+
166
+ if data_icu:
167
+ gen=Generator(task,cohort_output,data_mort,data_admn,data_los,diag_flag,proc_flag,out_flag,chart_flag,med_flag,impute,include,bucket,predW)
168
+ else:
169
+ gen=Generator(cohort_output,data_mort,data_admn,data_los,diag_flag,lab_flag,proc_flag,med_flag,impute,include,bucket,predW)
170
+
171
+ end = time.time()
172
+ print("Time elapsed : ", round((end - start)/60,2),"mins")
173
+ print("[============TASK COHORT SUCCESSFULLY CREATED============]")