Update Mimic4Dataset.py
Browse files- Mimic4Dataset.py +23 -24
Mimic4Dataset.py
CHANGED
@@ -13,6 +13,13 @@ import numpy as np
|
|
13 |
from .dataset_utils import vocab, concat_data, generate_deep, generate_ml, generate_text
|
14 |
from .task_cohort import create_cohort
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
|
18 |
_DESCRIPTION = """\
|
@@ -21,7 +28,7 @@ Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
|
|
21 |
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
|
22 |
mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2"
|
23 |
If you choose a Custom task provide a configuration file for the Time series.
|
24 |
-
Currently working with Mimic-IV
|
25 |
"""
|
26 |
_BASE_URL = "https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main"
|
27 |
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
|
@@ -29,7 +36,6 @@ _HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
|
|
29 |
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
|
30 |
_GIT_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline"
|
31 |
|
32 |
-
#_ICD_CODE = f"{_BASE_URL}/icd10.txt"
|
33 |
_DATA_GEN = f"{_BASE_URL}/data_generation_icu_modify.py"
|
34 |
_DATA_GEN_HOSP= f"{_BASE_URL}/data_generation_modify.py"
|
35 |
_DAY_INT= f"{_BASE_URL}/day_intervals_cohort_v22.py"
|
@@ -61,7 +67,6 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
61 |
self.test_size = kwargs.pop("test_size",0.2)
|
62 |
self.val_size = kwargs.pop("val_size",0.1)
|
63 |
self.generate_cohort = kwargs.pop("generate_cohort",True)
|
64 |
-
self.param = kwargs.pop("param",0)
|
65 |
|
66 |
if self.encoding == 'concat':
|
67 |
self.concat = True
|
@@ -152,7 +157,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
152 |
with open(self.conf) as f:
|
153 |
config = yaml.safe_load(f)
|
154 |
|
155 |
-
|
156 |
timeW = config['timeWindow']
|
157 |
self.timeW=int(timeW.split()[1])
|
158 |
self.bucket = config['timebucket']
|
@@ -215,7 +220,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
215 |
pickle.dump(test_dic, f)
|
216 |
return dict_dir
|
217 |
|
218 |
-
|
219 |
def verif_dim_tensor(self, proc, out, chart, meds, lab,interv):
|
220 |
verif=True
|
221 |
if self.feat_proc:
|
@@ -433,15 +438,6 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
433 |
feat_tocsv=True
|
434 |
for i, data in df.iterrows():
|
435 |
dyn_df,cond_df,demo=concat_data(data,self.interval,self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.condDict, self.procDict, self.outDict, self.chartDict, self.medDict)
|
436 |
-
if feat_tocsv:
|
437 |
-
#save the features of the vector for analysis purposes if needed
|
438 |
-
feats = list(dyn_df.columns.droplevel(0))
|
439 |
-
feats.extend(list(cond_df.columns))
|
440 |
-
feats.extend(list(demo.columns))
|
441 |
-
df_feats = pd.DataFrame(columns=feats)
|
442 |
-
path = './data/dict/'+self.config.name.replace(" ","_")+'/features_'+self.encoding+'.csv'
|
443 |
-
df_feats.to_csv(path)
|
444 |
-
feat_tocsv=False
|
445 |
dyn=dyn_df.copy()
|
446 |
dyn.columns=dyn.columns.droplevel(0)
|
447 |
concat_cols = [f"{col}_{t}" for t in range(dyn.shape[0]) for col in dyn.columns]
|
@@ -450,6 +446,18 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
450 |
demo['insurance']=ins_encoder.transform(demo['insurance'])
|
451 |
label = data['label']
|
452 |
demo=demo.drop(['label'],axis=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
453 |
X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat)
|
454 |
X=X.values[0]
|
455 |
|
@@ -535,16 +543,7 @@ class Mimic4Dataset(datasets.GeneratorBasedBuilder):
|
|
535 |
|
536 |
for key, data in dico.items():
|
537 |
cond_text,chart_text,meds_text,proc_text,out_text = generate_text(data,icd,items, self.feat_cond, self.feat_chart, self.feat_meds, self.feat_proc, self.feat_out)
|
538 |
-
|
539 |
-
text= cond_text+chart_text+meds_text+proc_text+out_text
|
540 |
-
elif self.param==2:
|
541 |
-
text= cond_text
|
542 |
-
elif self.param==3:
|
543 |
-
text=cond_text+ chart_text
|
544 |
-
elif self.param==4:
|
545 |
-
text=cond_text+ chart_text+meds_text
|
546 |
-
elif self.param==5:
|
547 |
-
text=cond_text+ chart_text+meds_text+proc_text
|
548 |
yield int(key),{
|
549 |
'label' : data['label'],
|
550 |
'text': text
|
|
|
13 |
from .dataset_utils import vocab, concat_data, generate_deep, generate_ml, generate_text
|
14 |
from .task_cohort import create_cohort
|
15 |
|
16 |
+
################################################################################
|
17 |
+
################################################################################
|
18 |
+
## ##
|
19 |
+
## MIMIC IV DATASET GENERATION SCRIPT ##
|
20 |
+
## ##
|
21 |
+
################################################################################
|
22 |
+
################################################################################
|
23 |
|
24 |
|
25 |
_DESCRIPTION = """\
|
|
|
28 |
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
|
29 |
mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2"
|
30 |
If you choose a Custom task provide a configuration file for the Time series.
|
31 |
+
Currently working with Mimic-IV ICU Data.
|
32 |
"""
|
33 |
_BASE_URL = "https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main"
|
34 |
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
|
|
|
36 |
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
|
37 |
_GIT_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline"
|
38 |
|
|
|
39 |
_DATA_GEN = f"{_BASE_URL}/data_generation_icu_modify.py"
|
40 |
_DATA_GEN_HOSP= f"{_BASE_URL}/data_generation_modify.py"
|
41 |
_DAY_INT= f"{_BASE_URL}/day_intervals_cohort_v22.py"
|
|
|
67 |
self.test_size = kwargs.pop("test_size",0.2)
|
68 |
self.val_size = kwargs.pop("val_size",0.1)
|
69 |
self.generate_cohort = kwargs.pop("generate_cohort",True)
|
|
|
70 |
|
71 |
if self.encoding == 'concat':
|
72 |
self.concat = True
|
|
|
157 |
with open(self.conf) as f:
|
158 |
config = yaml.safe_load(f)
|
159 |
|
160 |
+
#get config parameters for time series and features
|
161 |
timeW = config['timeWindow']
|
162 |
self.timeW=int(timeW.split()[1])
|
163 |
self.bucket = config['timebucket']
|
|
|
220 |
pickle.dump(test_dic, f)
|
221 |
return dict_dir
|
222 |
|
223 |
+
#verify if the dimension of the tensors corresponds to the time window
|
224 |
def verif_dim_tensor(self, proc, out, chart, meds, lab,interv):
|
225 |
verif=True
|
226 |
if self.feat_proc:
|
|
|
438 |
feat_tocsv=True
|
439 |
for i, data in df.iterrows():
|
440 |
dyn_df,cond_df,demo=concat_data(data,self.interval,self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.condDict, self.procDict, self.outDict, self.chartDict, self.medDict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
441 |
dyn=dyn_df.copy()
|
442 |
dyn.columns=dyn.columns.droplevel(0)
|
443 |
concat_cols = [f"{col}_{t}" for t in range(dyn.shape[0]) for col in dyn.columns]
|
|
|
446 |
demo['insurance']=ins_encoder.transform(demo['insurance'])
|
447 |
label = data['label']
|
448 |
demo=demo.drop(['label'],axis=1)
|
449 |
+
if feat_tocsv:
|
450 |
+
#save the features of the vector for analysis purposes if needed
|
451 |
+
if self.encoding == 'concat':
|
452 |
+
feats = concat_cols
|
453 |
+
else:
|
454 |
+
feats = list(dyn_df.columns.droplevel(0))
|
455 |
+
feats.extend(list(cond_df.columns))
|
456 |
+
feats.extend(list(demo.columns))
|
457 |
+
df_feats = pd.DataFrame(columns=feats)
|
458 |
+
path = './data/dict/'+self.config.name.replace(" ","_")+'/features_'+self.encoding+'.csv'
|
459 |
+
df_feats.to_csv(path)
|
460 |
+
feat_tocsv=False
|
461 |
X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat)
|
462 |
X=X.values[0]
|
463 |
|
|
|
543 |
|
544 |
for key, data in dico.items():
|
545 |
cond_text,chart_text,meds_text,proc_text,out_text = generate_text(data,icd,items, self.feat_cond, self.feat_chart, self.feat_meds, self.feat_proc, self.feat_out)
|
546 |
+
text= cond_text+chart_text+meds_text+proc_text+out_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
547 |
yield int(key),{
|
548 |
'label' : data['label'],
|
549 |
'text': text
|