Mimic4Dataset / Mimic4Dataset.py
thbndi's picture
Update Mimic4Dataset.py
680f9e4
raw
history blame
No virus
49.3 kB
import os
import pandas as pd
import datasets
import sys
import pickle
import subprocess
import shutil
from urllib.request import urlretrieve
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import numpy as np
from tqdm import tqdm
import yaml
import time
import torch
_DESCRIPTION = """\
Dataset for mimic4 data, by default for the Mortality task.
Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2"
If you choose a Custom task provide a configuration file for the Time series.
"""
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline"
_DATA_GEN = 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/data_generation_icu_modify.py'
_DATA_GEN_HOSP= 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/data_generation_modify.py'
_DAY_INT= 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/day_intervals_cohort_v22.py'
_CONFIG_URLS = {'los' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/los.config',
'mortality' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/mortality.config',
'phenotype' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/phenotype.config',
'readmission' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/readmission.config'
}
def check_config(task,config_file):
with open(config_file) as f:
config = yaml.safe_load(f)
if task=='Phenotype':
disease_label = config['disease_label']
else :
disease_label = ""
time = config['timePrediction']
label = task
timeW = config['timeWindow']
include=int(timeW.split()[1])
bucket = config['timebucket']
radimp = config['radimp']
predW = config['predW']
disease_filter = config['disease_filter']
icu_no_icu = config['icu_no_icu']
groupingDiag = config['groupingDiag']
assert( icu_no_icu in ['ICU','Non-ICU' ], "Chossen data should be one of the following: ICU, Non-ICU")
data_icu = icu_no_icu=='ICU'
if data_icu:
chart_flag = config['chart']
output_flag = config['output']
select_chart = config['select_chart']
lab_flag = False
select_lab = False
else:
lab_flag =config['lab']
select_lab = config['select_lab']
groupingMed = config['groupingMed']
groupingProc = config['groupingProc']
chart_flag = False
output_flag = False
select_chart = False
diag_flag= config['diagnosis']
proc_flag = config['proc']
meds_flag = config['meds']
select_diag= config['select_diag']
select_med= config['select_med']
select_proc= config['select_proc']
select_out = config['select_out']
outlier_removal=config['outlier_removal']
thresh=config['outlier']
left_thresh=config['left_outlier']
if data_icu:
assert (isinstance(select_diag,bool) and isinstance(select_med,bool) and isinstance(select_proc,bool) and isinstance(select_out,bool) and isinstance(select_chart,bool), " select_diag, select_chart, select_med, select_proc, select_out should be boolean")
assert (isinstance(chart_flag,bool) and isinstance(output_flag,bool) and isinstance(diag_flag,bool) and isinstance(proc_flag,bool) and isinstance(meds_flag,bool), "chart_flag, output_flag, diag_flag, proc_flag, meds_flag should be boolean")
else:
assert (isinstance(select_diag,bool) and isinstance(select_med,bool) and isinstance(select_proc,bool) and isinstance(select_out,bool) and isinstance(select_lab,bool), " select_diag, select_lab, select_med, select_proc, select_out should be boolean")
assert (isinstance(lab_flag,bool) and isinstance(diag_flag,bool) and isinstance(proc_flag,bool) and isinstance(meds_flag,bool), "lab_flag, diag_flag, proc_flag, meds_flag should be boolean")
if task=='Phenotype':
if disease_label=='Heart Failure':
label='Readmission'
time=30
disease_label='I50'
elif disease_label=='CAD':
label='Readmission'
time=30
disease_label='I25'
elif disease_label=='CKD':
label='Readmission'
time=30
disease_label='N18'
elif disease_label=='COPD':
label='Readmission'
time=30
disease_label='J44'
else :
raise ValueError('Disease label not correct provide one in the list: Heart Failure, CAD, CKD, COPD')
predW=0
assert (timeW[0]=='Last' and include<=72 and include>=24, "Time window should be between Last 24 and Last 72")
elif task=='Mortality':
time=0
label= 'Mortality'
assert (predW<=8 and predW>=2, "Prediction window should be between 2 and 8")
assert (timeW[0]=='Fisrt' and include<=72 and include>=24, "Time window should be between First 24 and First 72")
elif task=='Length of Stay':
label= 'Length of Stay'
assert (timeW[0]=='Fisrt' and include<=72 and include>=24, "Time window should be between Fisrt 24 and Fisrt 72")
assert (time<=10 and time>=1, "Length of stay should be between 1 and 10")
predW=0
elif task=='Readmission':
label= 'Readmission'
assert (timeW[0]=='Last' and include<=72 and include>=24, "Time window should be between Last 24 and Last 72")
assert (time<=150 and time>=10 and time%10==0, "Readmission window should be between 10 and 150 with a step of 10")
predW=0
else:
raise ValueError('Task not correct')
assert( disease_filter in ['Heart Failure','COPD','CKD','CAD',""], "Disease filter should be one of the following: Heart Failure, COPD, CKD, CAD or empty")
assert( groupingDiag in ['Convert ICD-9 to ICD-10 and group ICD-10 codes','Keep both ICD-9 and ICD-10 codes','Convert ICD-9 to ICD-10 codes'], "Grouping ICD should be one of the following: Convert ICD-9 to ICD-10 and group ICD-10 codes, Keep both ICD-9 and ICD-10 codes, Convert ICD-9 to ICD-10 codes")
assert (bucket<=6 and bucket>=1 and isinstance(bucket, int), "Time bucket should be between 1 and 6 and an integer")
assert (radimp in ['No Imputation', 'forward fill and mean','forward fill and median'], "imputation should be one of the following: No Imputation, forward fill and mean, forward fill and median")
if chart_flag:
assert (left_thresh>=0 and left_thresh<=10 and isinstance(left_thresh, int), "Left outlier threshold should be between 0 and 10 and an integer")
assert (thresh>=90 and thresh<=99 and isinstance(thresh, int), "Outlier threshold should be between 90 and 99 and an integer")
assert (outlier_removal in ['No outlier detection','Impute Outlier (default:98)','Remove outliers (default:98)'], "Outlier removal should be one of the following: No outlier detection, Impute Outlier (default:98), Remove outliers (default:98)")
if lab_flag:
assert (left_thresh>=0 and left_thresh<=10 and isinstance(left_thresh, int), "Left outlier threshold should be between 0 and 10 and an integer")
assert (thresh>=90 and thresh<=99 and isinstance(thresh, int), "Outlier threshold should be between 90 and 99 and an integer")
assert (outlier_removal in ['No outlier detection','Impute Outlier (default:98)','Remove outliers (default:98)'], "Outlier removal should be one of the following: No outlier detection, Impute Outlier (default:98), Remove outliers (default:98)")
assert (groupingProc in ['ICD-9 and ICD-10','ICD-10'], "Grouping procedure should be one of the following: ICD-9 and ICD-10, ICD-10")
assert (groupingMed in ['Yes','No'], "Do you want to group Medication codes to use Non propietary names? : Grouping medication should be one of the following: Yes, No")
return label, time, disease_label, predW
def create_vocab(file,task):
with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
condVocab = pickle.load(fp)
condVocabDict={}
condVocabDict[0]=0
for val in range(len(condVocab)):
condVocabDict[condVocab[val]]= val+1
return condVocabDict
def gender_vocab():
genderVocabDict={}
genderVocabDict['<PAD>']=0
genderVocabDict['M']=1
genderVocabDict['F']=2
return genderVocabDict
def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag):
condVocabDict={}
procVocabDict={}
medVocabDict={}
outVocabDict={}
chartVocabDict={}
labVocabDict={}
ethVocabDict={}
ageVocabDict={}
genderVocabDict={}
insVocabDict={}
ethVocabDict=create_vocab('ethVocab',task)
with open('./data/dict/'+task+'/ethVocabDict', 'wb') as fp:
pickle.dump(ethVocabDict, fp)
ageVocabDict=create_vocab('ageVocab',task)
with open('./data/dict/'+task+'/ageVocabDict', 'wb') as fp:
pickle.dump(ageVocabDict, fp)
genderVocabDict=gender_vocab()
with open('./data/dict/'+task+'/genderVocabDict', 'wb') as fp:
pickle.dump(genderVocabDict, fp)
insVocabDict=create_vocab('insVocab',task)
with open('./data/dict/'+task+'/insVocabDict', 'wb') as fp:
pickle.dump(insVocabDict, fp)
if diag_flag:
file='condVocab'
with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
condVocabDict = pickle.load(fp)
if proc_flag:
file='procVocab'
with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
procVocabDict = pickle.load(fp)
if med_flag:
file='medVocab'
with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
medVocabDict = pickle.load(fp)
if out_flag:
file='outVocab'
with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
outVocabDict = pickle.load(fp)
if chart_flag:
file='chartVocab'
with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
chartVocabDict = pickle.load(fp)
if lab_flag:
file='labsVocab'
with open ('./data/dict/'+task+'/'+file, 'rb') as fp:
labVocabDict = pickle.load(fp)
return len(condVocabDict),len(procVocabDict),len(medVocabDict),len(outVocabDict),len(chartVocabDict),len(labVocabDict),ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict
def concat_data(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab):
meds=data['Med']
proc = data['Proc']
out = data['Out']
chart = data['Chart']
cond= data['Cond']['fids']
cond_df=pd.DataFrame()
proc_df=pd.DataFrame()
out_df=pd.DataFrame()
chart_df=pd.DataFrame()
meds_df=pd.DataFrame()
#demographic
demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance'])
new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']}
demo = demo.append(new_row, ignore_index=True)
##########COND#########
if (feat_cond):
#get all conds
with open("./data/dict/"+task+"/condVocab", 'rb') as fp:
conDict = pickle.load(fp)
conds=pd.DataFrame(conDict,columns=['COND'])
features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND'])
#onehot encode
if(cond ==[]):
cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND'])
cond_df=cond_df.fillna(0)
else:
cond_df=pd.DataFrame(cond,columns=['COND'])
cond_df['val']=1
cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True)
cond_df=cond_df.fillna(0)
oneh = cond_df.sum().to_frame().T
combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0)
combined_oneh=combined_df.sum().to_frame().T
cond_df=combined_oneh
##########PROC#########
if (feat_proc):
with open("./data/dict/"+task+"/procVocab", 'rb') as fp:
procDic = pickle.load(fp)
if proc :
feat=proc.keys()
proc_val=[proc[key] for key in feat]
procedures=pd.DataFrame(procDic,columns=['PROC'])
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
procs=pd.DataFrame(columns=feat)
for p,v in zip(feat,proc_val):
procs[p]=v
procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns])
proc_df = pd.concat([features,procs],ignore_index=True).fillna(0)
else:
procedures=pd.DataFrame(procDic,columns=['PROC'])
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC'])
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns])
proc_df=features.fillna(0)
##########OUT#########
if (feat_out):
with open("./data/dict/"+task+"/outVocab", 'rb') as fp:
outDic = pickle.load(fp)
if out :
feat=out.keys()
out_val=[out[key] for key in feat]
outputs=pd.DataFrame(outDic,columns=['OUT'])
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT'])
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns])
outs=pd.DataFrame(columns=feat)
for o,v in zip(feat,out_val):
outs[o]=v
outs.columns=pd.MultiIndex.from_product([["OUT"], outs.columns])
out_df = pd.concat([features,outs],ignore_index=True).fillna(0)
else:
outputs=pd.DataFrame(outDic,columns=['OUT'])
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT'])
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns])
out_df=features.fillna(0)
##########CHART#########
if (feat_chart):
with open("./data/dict/"+task+"/chartVocab", 'rb') as fp:
chartDic = pickle.load(fp)
if chart:
charts=chart['val']
feat=charts.keys()
chart_val=[charts[key] for key in feat]
charts=pd.DataFrame(chartDic,columns=['CHART'])
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
chart=pd.DataFrame(columns=feat)
for c,v in zip(feat,chart_val):
chart[c]=v
chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns])
chart_df = pd.concat([features,chart],ignore_index=True).fillna(0)
else:
charts=pd.DataFrame(chartDic,columns=['CHART'])
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART'])
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns])
chart_df=features.fillna(0)
##########LAB#########
if (feat_lab):
with open("./data/dict/"+task+"/labsVocab", 'rb') as fp:
chartDic = pickle.load(fp)
if chart:
charts=chart['val']
feat=charts.keys()
chart_val=[charts[key] for key in feat]
charts=pd.DataFrame(chartDic,columns=['LAB'])
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB'])
features.columns=pd.MultiIndex.from_product([["LAB"], features.columns])
chart=pd.DataFrame(columns=feat)
for c,v in zip(feat,chart_val):
chart[c]=v
chart.columns=pd.MultiIndex.from_product([["LAB"], chart.columns])
chart_df = pd.concat([features,chart],ignore_index=True).fillna(0)
else:
charts=pd.DataFrame(chartDic,columns=['LAB'])
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB'])
features.columns=pd.MultiIndex.from_product([["LAB"], features.columns])
chart_df=features.fillna(0)
###MEDS
if (feat_meds):
with open("./data/dict/"+task+"/medVocab", 'rb') as fp:
medDic = pickle.load(fp)
if meds:
feat=meds['signal'].keys()
med_val=[meds['amount'][key] for key in feat]
meds=pd.DataFrame(medDic,columns=['MEDS'])
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS'])
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
med=pd.DataFrame(columns=feat)
for m,v in zip(feat,med_val):
med[m]=v
med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns])
meds_df = pd.concat([features,med],ignore_index=True).fillna(0)
else:
meds=pd.DataFrame(medDic,columns=['MEDS'])
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS'])
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns])
meds_df=features.fillna(0)
dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1)
return dyn_df,cond_df,demo
def getXY_deep(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab):
stat_df = torch.zeros(size=(1,0))
demo_df = torch.zeros(size=(1,0))
meds = torch.zeros(size=(0,0))
charts = torch.zeros(size=(0,0))
proc = torch.zeros(size=(0,0))
out = torch.zeros(size=(0,0))
lab = torch.zeros(size=(0,0))
stat_df = torch.zeros(size=(1,0))
demo_df = torch.zeros(size=(1,0))
size_cond, size_proc, size_meds, size_out, size_chart, size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,False)
dyn,cond_df,demo=concat_data(data,task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab)
###########""
if feat_chart:
charts = dyn['CHART']
charts=charts.to_numpy()
charts = torch.tensor(charts, dtype=torch.long)
charts = charts.tolist()
if feat_meds:
meds = dyn['MEDS']
meds=meds.to_numpy()
meds = torch.tensor(meds, dtype=torch.long)
meds = meds.tolist()
if feat_proc:
proc = dyn['PROC']
proc=proc.to_numpy()
proc = torch.tensor(proc, dtype=torch.long)
proc = proc.tolist()
if feat_out:
out = dyn['OUT']
out=out.to_numpy()
out = torch.tensor(out, dtype=torch.long)
out=out.view(out.shape[1],out.shape[2])
out = out.tolist()
if feat_lab:
lab = dyn['LAB']
lab=lab.to_numpy()
lab = torch.tensor(lab, dtype=torch.long)
lab = lab.tolist()
####################""
stat=cond_df
stat = stat.to_numpy()
stat = torch.tensor(stat)
if stat_df[0].nelement():
stat_df = torch.cat((stat_df,stat),0)
else:
stat_df = stat
y = int(demo['label'])
demo["gender"].replace(gender_vocab, inplace=True)
demo["ethnicity"].replace(eth_vocab, inplace=True)
demo["insurance"].replace(ins_vocab, inplace=True)
demo["Age"].replace(age_vocab, inplace=True)
demo=demo[["gender","ethnicity","insurance","Age"]]
demo = demo.values
demo = torch.tensor(demo)
if demo_df[0].nelement():
demo_df = torch.cat((demo_df,demo),0)
else:
demo_df = demo
stat_df = torch.tensor(stat_df)
stat_df = stat_df.type(torch.LongTensor)
stat_df = stat_df.squeeze()
demo_df = torch.tensor(demo_df)
demo_df = demo_df.type(torch.LongTensor)
demo_df=demo_df.squeeze()
y_df = torch.tensor(y)
y_df = y_df.type(torch.LongTensor)
return stat_df, demo_df, meds, charts, out, proc, lab, y_df
def getXY(dyn,stat,demo,concat_cols,concat):
X_df=pd.DataFrame()
if concat:
dyna=dyn.copy()
dyna.columns=dyna.columns.droplevel(0)
dyna=dyna.to_numpy()
dyna=np.nan_to_num(dyna, copy=False)
dyna=dyna.reshape(1,-1)
dyn_df=pd.DataFrame(data=dyna,columns=concat_cols)
else:
dyn_df=pd.DataFrame()
for key in dyn.columns.levels[0]:
dyn_temp=dyn[key]
if ((key=="CHART") or (key=="MEDS")):
agg=dyn_temp.aggregate("mean")
agg=agg.reset_index()
else:
agg=dyn_temp.aggregate("max")
agg=agg.reset_index()
if dyn_df.empty:
dyn_df=agg
else:
dyn_df=pd.concat([dyn_df,agg],axis=0)
dyn_df=dyn_df.T
dyn_df.columns = dyn_df.iloc[0]
dyn_df=dyn_df.iloc[1:,:]
X_df=pd.concat([dyn_df,stat],axis=1)
X_df=pd.concat([X_df,demo],axis=1)
return X_df
def task_cohort(task, mimic_path, config_path):
sys.path.append('./preprocessing/day_intervals_preproc')
sys.path.append('./utils')
sys.path.append('./preprocessing/hosp_module_preproc')
sys.path.append('./model')
import day_intervals_cohort_v22
import day_intervals_cohort
import feature_selection_icu
import data_generation_icu_modify
import data_generation_modify
import feature_selection_hosp
root_dir = os.path.dirname(os.path.abspath('UserInterface.ipynb'))
config_path='./config/'+config_path
with open(config_path) as f:
config = yaml.safe_load(f)
version_path = mimic_path+'/'
print(version_path)
version = mimic_path.split('/')[-1][0]
start = time.time()
#----------------------------------------------config----------------------------------------------------
label, tim, disease_label, predW = check_config(task,config_path)
icu_no_icu = config['icu_no_icu']
timeW = config['timeWindow']
include=int(timeW.split()[1])
bucket = config['timebucket']
radimp = config['radimp']
diag_flag = config['diagnosis']
proc_flag= config['proc']
med_flag = config['meds']
disease_filter = config['disease_filter']
groupingDiag = config['groupingDiag']
select_diag= config['select_diag']
select_med= config['select_med']
select_proc= config['select_proc']
if icu_no_icu=='ICU':
out_flag = config['output']
chart_flag = config['chart']
select_out= config['select_out']
select_chart= config['select_chart']
lab_flag = False
select_lab = False
else:
lab_flag = config['lab']
groupingMed = config['groupingMed']
groupingProc = config['groupingProc']
select_lab= config['select_lab']
out_flag = False
chart_flag = False
select_out= False
select_chart= False
# -------------------------------------------------------------------------------------------------------------
data_icu=icu_no_icu=="ICU"
data_mort=label=="Mortality"
data_admn=label=='Readmission'
data_los=label=='Length of Stay'
if (disease_filter=="Heart Failure"):
icd_code='I50'
elif (disease_filter=="CKD"):
icd_code='N18'
elif (disease_filter=="COPD"):
icd_code='J44'
elif (disease_filter=="CAD"):
icd_code='I25'
else:
icd_code='No Disease Filter'
#-----------------------------------------------EXTRACT MIMIC-----------------------------------------------------
if version == '2':
cohort_output = day_intervals_cohort_v22.extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label)
elif version == '1':
cohort_output = day_intervals_cohort.extract_data(icu_no_icu,label,tim,icd_code, root_dir,version_path,disease_label)
#----------------------------------------------FEATURES-------------------------------------------------------
if data_icu :
feature_selection_icu.feature_icu(cohort_output, version_path,diag_flag,out_flag,chart_flag,proc_flag,med_flag)
else:
feature_selection_hosp.feature_nonicu(cohort_output, version_path,diag_flag,lab_flag,proc_flag,med_flag)
#----------------------------------------------GROUPING-------------------------------------------------------
if data_icu:
if diag_flag:
group_diag=groupingDiag
feature_selection_icu.preprocess_features_icu(cohort_output, diag_flag, group_diag,False,False,False,0,0)
else:
if diag_flag:
group_diag=groupingDiag
if med_flag:
group_med=groupingMed
if proc_flag:
group_proc=groupingProc
feature_selection_hosp.preprocess_features_hosp(cohort_output, diag_flag,proc_flag,med_flag,False,group_diag,group_med,group_proc,False,False,0,0)
#----------------------------------------------SUMMARY-------------------------------------------------------
if data_icu:
feature_selection_icu.generate_summary_icu(diag_flag,proc_flag,med_flag,out_flag,chart_flag)
else:
feature_selection_hosp.generate_summary_hosp(diag_flag,proc_flag,med_flag,lab_flag)
#----------------------------------------------FEATURE SELECTION---------------------------------------------
if data_icu:
feature_selection_icu.features_selection_icu(cohort_output, diag_flag,proc_flag,med_flag,out_flag, chart_flag,select_diag,select_med,select_proc,select_out,select_chart)
else:
feature_selection_hosp.features_selection_hosp(cohort_output, diag_flag,proc_flag,med_flag,lab_flag,select_diag,select_med,select_proc,select_lab)
#---------------------------------------CLEANING OF FEATURES-----------------------------------------------
thresh=0
if data_icu:
if chart_flag:
outlier_removal=config['outlier_removal']
clean_chart=outlier_removal!='No outlier detection'
impute_outlier_chart=outlier_removal=='Impute Outlier (default:98)'
thresh=config['outlier']
left_thresh=config['left_outlier']
feature_selection_icu.preprocess_features_icu(cohort_output, False, False,chart_flag,clean_chart,impute_outlier_chart,thresh,left_thresh)
else:
if lab_flag:
outlier_removal=config['outlier_removal']
clean_chart=outlier_removal!='No outlier detection'
impute_outlier_chart=outlier_removal=='Impute Outlier (default:98)'
thresh=config['outlier']
left_thresh=config['left_outlier']
feature_selection_hosp.preprocess_features_hosp(cohort_output, False,False, False,lab_flag,False,False,False,clean_chart,impute_outlier_chart,thresh,left_thresh)
# ---------------------------------------tim-Series Representation--------------------------------------------
if radimp == 'forward fill and mean' :
impute='Mean'
elif radimp =='forward fill and median':
impute = 'Median'
else :
impute = False
if data_icu:
gen=data_generation_icu_modify.Generator(task,cohort_output,data_mort,data_admn,data_los,diag_flag,proc_flag,out_flag,chart_flag,med_flag,impute,include,bucket,predW)
else:
gen=data_generation_modify.Generator(cohort_output,data_mort,data_admn,data_los,diag_flag,lab_flag,proc_flag,med_flag,impute,include,bucket,predW)
end = time.time()
print("Time elapsed : ", round((end - start)/60,2),"mins")
print("[============TASK COHORT SUCCESSFULLY CREATED============]")
#############################################DATASET####################################################################
class Mimic4DatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for Mimic4Dataset."""
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
class Mimic4Dataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def __init__(self, **kwargs):
self.mimic_path = kwargs.pop("mimic_path", None)
self.encoding = kwargs.pop("encoding",'raw')
self.config_path = kwargs.pop("config_path",None)
self.test_size = kwargs.pop("test_size",0.2)
self.val_size = kwargs.pop("val_size",0.1)
self.generate_cohort = kwargs.pop("generate_cohort",True)
if self.encoding == 'concat':
self.concat = True
else:
self.concat = False
super().__init__(**kwargs)
BUILDER_CONFIGS = [
Mimic4DatasetConfig(
name="Phenotype",
version=VERSION,
description="Dataset for mimic4 Phenotype task"
),
Mimic4DatasetConfig(
name="Readmission",
version=VERSION,
description="Dataset for mimic4 Readmission task"
),
Mimic4DatasetConfig(
name="Length of Stay",
version=VERSION,
description="Dataset for mimic4 Length of Stay task"
),
Mimic4DatasetConfig(
name="Mortality",
version=VERSION,
description="Dataset for mimic4 Mortality task"
),
]
DEFAULT_CONFIG_NAME = "Mortality"
def create_cohort(self):
if self.config_path==None:
if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype']
if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission']
if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los']
if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality']
version = self.mimic_path.split('/')[-1]
mimic_folder= self.mimic_path.split('/')[-2]
mimic_complete_path='/'+mimic_folder+'/'+version
current_directory = os.getcwd()
if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'):
dir =os.path.dirname(current_directory)
os.chdir(dir)
else:
#move to parent directory of mimic data
dir = self.mimic_path.replace(mimic_complete_path,'')
if dir[-1]!='/':
dir=dir+'/'
elif dir=='':
dir="./"
parent_dir = os.path.dirname(self.mimic_path)
os.chdir(parent_dir)
#####################clone git repo if doesnt exists
repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline'
if os.path.exists('MIMIC-IV-Data-Pipeline-main'):
path_bench = './MIMIC-IV-Data-Pipeline-main'
else:
path_bench ='./MIMIC-IV-Data-Pipeline-main'
subprocess.run(["git", "clone", repo_url, path_bench])
os.makedirs(path_bench+'/mimic-iv')
shutil.move(version,path_bench+'/mimic-iv')
os.chdir(path_bench)
self.mimic_path = './mimic-iv/'+version
####################Get configurations param
#download config file if not custom
if self.config_path[0:4] == 'http':
c = self.config_path.split('/')[-1]
file_path, head = urlretrieve(self.config_path,c)
else :
file_path = self.config_path
if not os.path.exists('./config'):
os.makedirs('config')
#save config file in config folder
self.conf='./config/'+file_path.split('/')[-1]
if not os.path.exists(self.conf):
shutil.move(file_path,'./config')
with open(self.conf) as f:
config = yaml.safe_load(f)
timeW = config['timeWindow']
self.timeW=int(timeW.split()[1])
self.bucket = config['timebucket']
self.data_icu = config['icu_no_icu']=='ICU'
if self.data_icu:
self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out, self.lab = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output'], False
self.feat_lab = False
else:
self.feat_cond, self.feat_lab, self.feat_proc, self.feat_meds, self.feat_chart, self.out = config['diagnosis'], config['lab'], config['proc'], config['meds'], False, False
self.feat_out = False
self.feat_chart = False
#####################downloads modules from hub
if not os.path.exists('./model/data_generation_icu_modify.py'):
file_path, head = urlretrieve(_DATA_GEN, "data_generation_icu_modify.py")
shutil.move(file_path, './model')
if not os.path.exists('./model/data_generation_modify.py'):
file_path, head = urlretrieve(_DATA_GEN_HOSP, "data_generation_modify.py")
shutil.move(file_path, './model')
if not os.path.exists('./preprocessing/day_intervals_preproc/day_intervals_cohort_v22.py'):
file_path, head = urlretrieve(_DAY_INT, "day_intervals_cohort_v22.py")
shutil.move(file_path, './preprocessing/day_intervals_preproc')
data_dir = "./data/dict/"+self.config.name.replace(" ","_")+"/dataDic"
sys.path.append(path_bench)
config = self.config_path.split('/')[-1]
#####################create task cohort
if self.generate_cohort:
task_cohort(self.config.name.replace(" ","_"),self.mimic_path,config)
#####################Split data into train, test and val
with open(data_dir, 'rb') as fp:
dataDic = pickle.load(fp)
data = pd.DataFrame.from_dict(dataDic)
dict_dir = "./data/dict/"+self.config.name.replace(" ","_")
data=data.T
train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
if self.val_size > 0 :
train_data, val_data = train_test_split(train_data, test_size=self.val_size, random_state=42)
val_dic = val_data.to_dict('index')
val_path = dict_dir+'/val_data.pkl'
with open(val_path, 'wb') as f:
pickle.dump(val_dic, f)
train_dic = train_data.to_dict('index')
test_dic = test_data.to_dict('index')
train_path = dict_dir+'/train_data.pkl'
test_path = dict_dir+'/test_data.pkl'
with open(train_path, 'wb') as f:
pickle.dump(train_dic, f)
with open(test_path, 'wb') as f:
pickle.dump(test_dic, f)
return dict_dir
###########################################################RAW##################################################################
def _info_raw(self):
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"gender": datasets.Value("string"),
"ethnicity": datasets.Value("string"),
"insurance": datasets.Value("string"),
"age": datasets.Value("int32"),
"COND": datasets.Sequence(datasets.Value("string")),
"MEDS": {
"signal":
{
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
}
,
"rate":
{
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
}
,
"amount":
{
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
}
},
"PROC": {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
"CHART/LAB":
{
"signal" : {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
"val" : {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
},
"OUT": {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples_raw(self, filepath):
with open(filepath, 'rb') as fp:
dataDic = pickle.load(fp)
for hid, data in dataDic.items():
proc_features = data['Proc']
meds_features = data['Med']
out_features = data['Out']
cond_features = data['Cond']['fids']
eth= data['ethnicity']
age = data['age']
gender = data['gender']
label = data['label']
insurance=data['insurance']
items = list(proc_features.keys())
values =[proc_features[i] for i in items ]
procs = {"id" : items,
"value": values}
items_outs = list(out_features.keys())
values_outs =[out_features[i] for i in items_outs ]
outs = {"id" : items_outs,
"value": values_outs}
if self.data_icu:
chart_features = data['Chart']
else:
chart_features = data['Lab']
#chart signal
if ('signal' in chart_features):
items_chart_sig = list(chart_features['signal'].keys())
values_chart_sig =[chart_features['signal'][i] for i in items_chart_sig ]
chart_sig = {"id" : items_chart_sig,
"value": values_chart_sig}
else:
chart_sig = {"id" : [],
"value": []}
#chart val
if ('val' in chart_features):
items_chart_val = list(chart_features['val'].keys())
values_chart_val =[chart_features['val'][i] for i in items_chart_val ]
chart_val = {"id" : items_chart_val,
"value": values_chart_val}
else:
chart_val = {"id" : [],
"value": []}
charts = {"signal" : chart_sig,
"val" : chart_val}
#meds signal
if ('signal' in meds_features):
items_meds_sig = list(meds_features['signal'].keys())
values_meds_sig =[meds_features['signal'][i] for i in items_meds_sig ]
meds_sig = {"id" : items_meds_sig,
"value": values_meds_sig}
else:
meds_sig = {"id" : [],
"value": []}
#meds rate
if ('rate' in meds_features):
items_meds_rate = list(meds_features['rate'].keys())
values_meds_rate =[meds_features['rate'][i] for i in items_meds_rate ]
meds_rate = {"id" : items_meds_rate,
"value": values_meds_rate}
else:
meds_rate = {"id" : [],
"value": []}
#meds amount
if ('amount' in meds_features):
items_meds_amount = list(meds_features['amount'].keys())
values_meds_amount =[meds_features['amount'][i] for i in items_meds_amount ]
meds_amount = {"id" : items_meds_amount,
"value": values_meds_amount}
else:
meds_amount = {"id" : [],
"value": []}
meds = {"signal" : meds_sig,
"rate" : meds_rate,
"amount" : meds_amount}
yield int(hid), {
"label" : label,
"gender" : gender,
"ethnicity" : eth,
"insurance" : insurance,
"age" : age,
"COND" : cond_features,
"PROC" : procs,
"CHART/LAB" : charts,
"OUT" : outs,
"MEDS" : meds
}
###########################################################ENCODED##################################################################
def _info_encoded(self):
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"features" : datasets.Sequence(datasets.Value("float32")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples_encoded(self, filepath):
path= './data/dict/'+self.config.name.replace(" ","_")+'/ethVocab'
with open(path, 'rb') as fp:
ethVocab = pickle.load(fp)
path= './data/dict/'+self.config.name.replace(" ","_")+'/insVocab'
with open(path, 'rb') as fp:
insVocab = pickle.load(fp)
genVocab = ['<PAD>', 'M', 'F']
gen_encoder = LabelEncoder()
eth_encoder = LabelEncoder()
ins_encoder = LabelEncoder()
gen_encoder.fit(genVocab)
eth_encoder.fit(ethVocab)
ins_encoder.fit(insVocab)
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
df = pd.DataFrame.from_dict(dico, orient='index')
task=self.config.name.replace(" ","_")
for i, data in df.iterrows():
concat_cols=[]
dyn_df,cond_df,demo=concat_data(data,task,self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
dyn=dyn_df.copy()
dyn.columns=dyn.columns.droplevel(0)
cols=dyn.columns
time=dyn.shape[0]
for t in range(time):
cols_t = [str(x) + "_"+str(t) for x in cols]
concat_cols.extend(cols_t)
demo['gender']=gen_encoder.transform(demo['gender'])
demo['ethnicity']=eth_encoder.transform(demo['ethnicity'])
demo['insurance']=ins_encoder.transform(demo['insurance'])
label = data['label']
demo=demo.drop(['label'],axis=1)
X= getXY(dyn_df,cond_df,demo,concat_cols,self.concat)
X=X.values.tolist()[0]
yield int(i), {
"label": label,
"features": X,
}
######################################################DEEP###############################################################
def _info_deep(self):
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
#"DEMO": datasets.Array2D(shape=(None, 4), dtype='int64') ,
"DEMO": datasets.Sequence(datasets.Value("int64")),
"COND" : datasets.Sequence(datasets.Value("int64")),
#"COND" : datasets.Array2D(shape=(None, self.size_cond), dtype='int64') ,
"MEDS" : datasets.Array2D(shape=(None, self.size_meds), dtype='int64') ,
"PROC" : datasets.Array2D(shape=(None, self.size_proc), dtype='int64') ,
"CHART/LAB" : datasets.Array2D(shape=(None, self.size_chart), dtype='int64') ,
#"CHART/LAB" : datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"OUT" : datasets.Array2D(shape=(None, self.size_out), dtype='int64') ,
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples_deep(self, filepath):
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
task=self.config.name.replace(" ","_")
for key, data in dico.items():
stat, demo, meds, chart, out, proc, lab, y = getXY_deep(data, task, self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab)
verri=True
if self.feat_proc:
if (len(proc)<(self.timeW//self.bucket)):
verri=False
if self.feat_out:
if (len(out)<(self.timeW//self.bucket)):
verri=False
if self.feat_chart:
if (len(chart)<(self.timeW//self.bucket)):
verri=False
if self.feat_meds:
if (len(meds)<(self.timeW//self.bucket)):
verri=False
if self.feat_lab:
if (len(lab)<(self.timeW//self.bucket)):
verri=False
if verri:
if self.data_icu:
yield int(key), {
'label': y,
'DEMO': demo,
'COND': stat,
'MEDS': meds,
'PROC': proc,
'CHART/LAB': chart,
'OUT': out,
}
else:
yield int(key), {
'label': y,
'DEMO': demo,
'COND': stat,
'MEDS': meds,
'PROC': proc,
'CHART/LAB': lab,
'OUT': out,
}
else:
continue
#############################################################################################################################
def _info(self):
self.path = self.create_cohort()
self.size_cond, self.size_proc, self.size_meds, self.size_out, self.size_chart, self.size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_meds,self.feat_lab)
if self.encoding == 'concat' :
return self._info_encoded()
elif self.encoding == 'aggreg' :
return self._info_encoded()
elif self.encoding == 'tensor' :
return self._info_deep()
else:
return self._info_raw()
def _split_generators(self, dl_manager):
csv_dir = "./data/dict/"+self.config.name.replace(" ","_")
if self.val_size > 0 :
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
]
else :
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}),
]
def _generate_examples(self, filepath):
if self.encoding == 'concat' :
yield from self._generate_examples_encoded(filepath)
elif self.encoding == 'aggreg' :
yield from self._generate_examples_encoded(filepath)
elif self.encoding == 'tensor' :
yield from self._generate_examples_deep(filepath)
else :
yield from self._generate_examples_raw(filepath)