thbndi commited on
Commit
3368b82
1 Parent(s): 5eb1005

Update Mimic4Dataset.py

Browse files
Files changed (1) hide show
  1. Mimic4Dataset.py +43 -37
Mimic4Dataset.py CHANGED
@@ -9,8 +9,8 @@ from urllib.request import urlretrieve
9
  from sklearn.model_selection import train_test_split
10
  from sklearn.preprocessing import LabelEncoder
11
  import yaml
12
- from .dataset_utils import create_vocab,gender_vocab,vocab, concat_data, generate_deep, generate_ml
13
- from .task_cohort import create_cohort, check_config_file
14
 
15
 
16
 
@@ -103,7 +103,11 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
103
  version = self.mimic_path.split('/')[-1]
104
  mimic_folder= self.mimic_path.split('/')[-2]
105
  mimic_complete_path='/'+mimic_folder+'/'+version
106
-
 
 
 
 
107
  current_directory = os.getcwd()
108
  if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'):
109
  dir =os.path.dirname(current_directory)
@@ -111,6 +115,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
111
  else:
112
  #move to parent directory of mimic data
113
  dir = self.mimic_path.replace(mimic_complete_path,'')
 
114
  if dir[-1]!='/':
115
  dir=dir+'/'
116
  elif dir=='':
@@ -138,9 +143,9 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
138
  file_path, head = urlretrieve(self.config_path,c)
139
  else :
140
  file_path = self.config_path
141
-
142
  if not os.path.exists('./config'):
143
  os.makedirs('config')
 
144
  #save config file in config folder
145
  self.conf='./config/'+file_path.split('/')[-1]
146
  if not os.path.exists(self.conf):
@@ -211,7 +216,26 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
211
  pickle.dump(test_dic, f)
212
 
213
  return dict_dir
214
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  ###########################################################RAW##################################################################
216
 
217
  def _info_raw(self):
@@ -407,11 +431,9 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
407
  dico = pickle.load(fp)
408
 
409
  df = pd.DataFrame.from_dict(dico, orient='index')
410
- task=self.config.name.replace(" ","_")
411
-
412
  for i, data in df.iterrows():
413
  concat_cols=[]
414
- dyn_df,cond_df,demo=concat_data(data,task,self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
415
  dyn=dyn_df.copy()
416
  dyn.columns=dyn.columns.droplevel(0)
417
  cols=dyn.columns
@@ -426,14 +448,16 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
426
  demo=demo.drop(['label'],axis=1)
427
  X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat)
428
  X=X.values.tolist()[0]
 
429
  size_concat = self.size_cond+ self.size_proc * ((self.timeW//self.bucket)+1) + self.size_meds* ((self.timeW//self.bucket)+1)+ self.size_out* ((self.timeW//self.bucket)+1)+ self.size_chart* ((self.timeW//self.bucket)+1)+ self.size_lab* ((self.timeW//self.bucket)+1) + 4
430
  size_aggreg = self.size_cond+ self.size_proc + self.size_meds+ self.size_out+ self.size_chart+ self.size_lab + 4
431
- print(len(X),size_concat,(self.timeW//self.bucket))
432
  if (len(X)==size_concat or len(X)==size_aggreg):
433
  yield int(i), {
434
  "label": label,
435
  "features": X,
436
  }
 
437
  ######################################################DEEP###############################################################
438
  def _info_deep(self):
439
  features = datasets.Features(
@@ -459,27 +483,11 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
459
  def _generate_examples_deep(self, filepath):
460
  with open(filepath, 'rb') as fp:
461
  dico = pickle.load(fp)
462
- task=self.config.name.replace(" ","_")
463
  for key, data in dico.items():
464
- stat, demo, meds, chart, out, proc, lab, y = generate_deep(data, task, self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
465
-
466
- verri=True
467
- if self.feat_proc:
468
- if (len(proc)<(self.timeW//self.bucket)):
469
- verri=False
470
- if self.feat_out:
471
- if (len(out)<(self.timeW//self.bucket)):
472
- verri=False
473
- if self.feat_chart:
474
- if (len(chart)<(self.timeW//self.bucket)):
475
- verri=False
476
- if self.feat_meds:
477
- if (len(meds)<(self.timeW//self.bucket)):
478
- verri=False
479
- if self.feat_lab:
480
- if (len(lab)<(self.timeW//self.bucket)):
481
- verri=False
482
- if verri:
483
  if self.data_icu:
484
  yield int(key), {
485
  'label': y,
@@ -505,7 +513,6 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
505
  def _info(self):
506
  self.path = self.init_cohort()
507
  self.size_cond, self.size_proc, self.size_meds, self.size_out, self.size_chart, self.size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_meds,self.feat_lab)
508
-
509
  if (self.encoding == 'concat' or self.encoding =='aggreg'):
510
  return self._info_encoded()
511
 
@@ -517,21 +524,20 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
517
 
518
 
519
  def _split_generators(self, dl_manager):
520
- csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
521
  if self.val_size > 0 :
522
  return [
523
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
524
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}),
525
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
526
  ]
527
  else :
528
  return [
529
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
530
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
531
  ]
532
 
533
  def _generate_examples(self, filepath):
534
-
535
  if (self.encoding == 'concat' or self.encoding == 'aggreg'):
536
  yield from self._generate_examples_encoded(filepath)
537
 
 
9
  from sklearn.model_selection import train_test_split
10
  from sklearn.preprocessing import LabelEncoder
11
  import yaml
12
+ from .dataset_utils import vocab, concat_data, generate_deep, generate_ml
13
+ from .task_cohort import create_cohort
14
 
15
 
16
 
 
103
  version = self.mimic_path.split('/')[-1]
104
  mimic_folder= self.mimic_path.split('/')[-2]
105
  mimic_complete_path='/'+mimic_folder+'/'+version
106
+ print('mimic_complete_path : ',mimic_complete_path)
107
+ print('mimic_folder : ',mimic_folder)
108
+ print('self.mimic_path : ',self.mimic_path)
109
+ print('version : ',version)
110
+
111
  current_directory = os.getcwd()
112
  if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'):
113
  dir =os.path.dirname(current_directory)
 
115
  else:
116
  #move to parent directory of mimic data
117
  dir = self.mimic_path.replace(mimic_complete_path,'')
118
+ print('dir : ',dir)
119
  if dir[-1]!='/':
120
  dir=dir+'/'
121
  elif dir=='':
 
143
  file_path, head = urlretrieve(self.config_path,c)
144
  else :
145
  file_path = self.config_path
 
146
  if not os.path.exists('./config'):
147
  os.makedirs('config')
148
+
149
  #save config file in config folder
150
  self.conf='./config/'+file_path.split('/')[-1]
151
  if not os.path.exists(self.conf):
 
216
  pickle.dump(test_dic, f)
217
 
218
  return dict_dir
219
+
220
+ def verif_dim_tensor(self, proc, out, chart, meds, lab):
221
+ verif=True
222
+ if self.feat_proc:
223
+ if (len(proc)<(self.timeW//self.bucket)):
224
+ verif=False
225
+ if self.feat_out:
226
+ if (len(out)<(self.timeW//self.bucket)):
227
+ verif=False
228
+ if self.feat_chart:
229
+ if (len(chart)<(self.timeW//self.bucket)):
230
+ verif=False
231
+ if self.feat_meds:
232
+ if (len(meds)<(self.timeW//self.bucket)):
233
+ verif=False
234
+ if self.feat_lab:
235
+ if (len(lab)<(self.timeW//self.bucket)):
236
+ verif=False
237
+ return verif
238
+
239
  ###########################################################RAW##################################################################
240
 
241
  def _info_raw(self):
 
431
  dico = pickle.load(fp)
432
 
433
  df = pd.DataFrame.from_dict(dico, orient='index')
 
 
434
  for i, data in df.iterrows():
435
  concat_cols=[]
436
+ dyn_df,cond_df,demo=concat_data(data,self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
437
  dyn=dyn_df.copy()
438
  dyn.columns=dyn.columns.droplevel(0)
439
  cols=dyn.columns
 
448
  demo=demo.drop(['label'],axis=1)
449
  X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat)
450
  X=X.values.tolist()[0]
451
+
452
  size_concat = self.size_cond+ self.size_proc * ((self.timeW//self.bucket)+1) + self.size_meds* ((self.timeW//self.bucket)+1)+ self.size_out* ((self.timeW//self.bucket)+1)+ self.size_chart* ((self.timeW//self.bucket)+1)+ self.size_lab* ((self.timeW//self.bucket)+1) + 4
453
  size_aggreg = self.size_cond+ self.size_proc + self.size_meds+ self.size_out+ self.size_chart+ self.size_lab + 4
454
+
455
  if (len(X)==size_concat or len(X)==size_aggreg):
456
  yield int(i), {
457
  "label": label,
458
  "features": X,
459
  }
460
+
461
  ######################################################DEEP###############################################################
462
  def _info_deep(self):
463
  features = datasets.Features(
 
483
  def _generate_examples_deep(self, filepath):
484
  with open(filepath, 'rb') as fp:
485
  dico = pickle.load(fp)
486
+
487
  for key, data in dico.items():
488
+ stat, demo, meds, chart, out, proc, lab, y = generate_deep(data, self.config.name.replace(" ","_"), self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
489
+
490
+ if self.verif_dim_tensor(proc, out, chart, meds, lab):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
  if self.data_icu:
492
  yield int(key), {
493
  'label': y,
 
513
  def _info(self):
514
  self.path = self.init_cohort()
515
  self.size_cond, self.size_proc, self.size_meds, self.size_out, self.size_chart, self.size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_meds,self.feat_lab)
 
516
  if (self.encoding == 'concat' or self.encoding =='aggreg'):
517
  return self._info_encoded()
518
 
 
524
 
525
 
526
  def _split_generators(self, dl_manager):
527
+ data_dir = "./data/dict/"+self.config.name.replace(" ","_")
528
  if self.val_size > 0 :
529
  return [
530
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}),
531
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/val_data.pkl'}),
532
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}),
533
  ]
534
  else :
535
  return [
536
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}),
537
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}),
538
  ]
539
 
540
  def _generate_examples(self, filepath):
 
541
  if (self.encoding == 'concat' or self.encoding == 'aggreg'):
542
  yield from self._generate_examples_encoded(filepath)
543