Mimic4Dataset / Mimic4Dataset.py
thbndi's picture
Update Mimic4Dataset.py
ae7f7e2
raw
history blame
27.2 kB
import os
import pandas as pd
import datasets
import sys
import pickle
import subprocess
import shutil
from urllib.request import urlretrieve
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import yaml
import numpy as np
from .dataset_utils import vocab, concat_data, generate_deep, generate_ml
from .task_cohort import create_cohort
_DESCRIPTION = """\
Dataset for mimic4 data, by default for the Mortality task.
Available tasks are: Mortality, Length of Stay, Readmission, Phenotype.
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main'
mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2"
If you choose a Custom task provide a configuration file for the Time series.
Currently working with Mimic-IV version 1 and 2
"""
_BASE_URL = "https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main"
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset"
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html"
_GIT_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline"
_ICD_CODE = f"{_BASE_URL}/icd10.txt"
_DATA_GEN = f"{_BASE_URL}/data_generation_icu_modify.py"
_DATA_GEN_HOSP= f"{_BASE_URL}/data_generation_modify.py"
_DAY_INT= f"{_BASE_URL}/day_intervals_cohort_v22.py"
_CONFIG_URLS = {'los' : f"{_BASE_URL}/config/los.config",
'mortality' : f"{_BASE_URL}/config/mortality.config",
'phenotype' : f"{_BASE_URL}/config/phenotype.config",
'readmission' : f"{_BASE_URL}/config/readmission.config"
}
class Mimic4DatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for Mimic4Dataset."""
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
class Mimic4Dataset(datasets.GeneratorBasedBuilder):
"""Create Mimic4Dataset dataset from Mimic-IV data stored in user machine."""
VERSION = datasets.Version("1.0.0")
def __init__(self, **kwargs):
self.mimic_path = kwargs.pop("mimic_path", None)
self.encoding = kwargs.pop("encoding",'concat')
self.config_path = kwargs.pop("config_path",None)
self.test_size = kwargs.pop("test_size",0.2)
self.val_size = kwargs.pop("val_size",0.1)
self.generate_cohort = kwargs.pop("generate_cohort",True)
if self.encoding == 'concat':
self.concat = True
else:
self.concat = False
super().__init__(**kwargs)
BUILDER_CONFIGS = [
Mimic4DatasetConfig(
name="Phenotype",
version=VERSION,
description="Dataset for mimic4 Phenotype task"
),
Mimic4DatasetConfig(
name="Readmission",
version=VERSION,
description="Dataset for mimic4 Readmission task"
),
Mimic4DatasetConfig(
name="Length of Stay",
version=VERSION,
description="Dataset for mimic4 Length of Stay task"
),
Mimic4DatasetConfig(
name="Mortality",
version=VERSION,
description="Dataset for mimic4 Mortality task"
),
]
DEFAULT_CONFIG_NAME = "Mortality"
def init_cohort(self):
if self.config_path==None:
if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype']
if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission']
if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los']
if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality']
version = self.mimic_path.split('/')[-1]
mimic_folder= self.mimic_path.split('/')[-2]
mimic_complete_path='/'+mimic_folder+'/'+version
current_directory = os.getcwd()
if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'):
dir =os.path.dirname(current_directory)
os.chdir(dir)
else:
#move to parent directory of mimic data
dir = self.mimic_path.replace(mimic_complete_path,'')
print('dir : ',dir)
if dir[-1]!='/':
dir=dir+'/'
elif dir=='':
dir="./"
parent_dir = os.path.dirname(self.mimic_path)
os.chdir(parent_dir)
#####################clone git repo if doesnt exists
repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline'
if os.path.exists('MIMIC-IV-Data-Pipeline-main'):
path_bench = './MIMIC-IV-Data-Pipeline-main'
else:
path_bench ='./MIMIC-IV-Data-Pipeline-main'
subprocess.run(["git", "clone", repo_url, path_bench])
os.makedirs(path_bench+'/'+'mimic-iv')
shutil.move(version,path_bench+'/'+'mimic-iv')
os.chdir(path_bench)
self.mimic_path = './'+'mimic-iv'+'/'+version
####################Get configurations param
#download config file if not custom
if self.config_path[0:4] == 'http':
c = self.config_path.split('/')[-1]
file_path, head = urlretrieve(self.config_path,c)
else :
file_path = self.config_path
if not os.path.exists('./config'):
os.makedirs('config')
#save config file in config folder
self.conf='./config/'+file_path.split('/')[-1]
if not os.path.exists(self.conf):
shutil.move(file_path,'./config')
with open(self.conf) as f:
config = yaml.safe_load(f)
timeW = config['timeWindow']
self.timeW=int(timeW.split()[1])
self.bucket = config['timebucket']
self.predW = config['predW']
self.data_icu = config['icu_no_icu']=='ICU'
if self.data_icu:
self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out, self.feat_lab = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output'], False
else:
self.feat_cond, self.feat_lab, self.feat_proc, self.feat_meds, self.feat_chart, self.feat_out = config['diagnosis'], config['lab'], config['proc'], config['meds'], False, False
#####################downloads modules from hub
if not os.path.exists('./icd10.txt'):
file_path, head = urlretrieve(_ICD_CODE, "icd10.txt")
shutil.move(file_path, './')
if not os.path.exists('./model/data_generation_icu_modify.py'):
file_path, head = urlretrieve(_DATA_GEN, "data_generation_icu_modify.py")
shutil.move(file_path, './model')
if not os.path.exists('./model/data_generation_modify.py'):
file_path, head = urlretrieve(_DATA_GEN_HOSP, "data_generation_modify.py")
shutil.move(file_path, './model')
if not os.path.exists('./preprocessing/day_intervals_preproc/day_intervals_cohort_v22.py'):
file_path, head = urlretrieve(_DAY_INT, "day_intervals_cohort_v22.py")
shutil.move(file_path, './preprocessing/day_intervals_preproc')
data_dir = "./data/dict/"+self.config.name.replace(" ","_")+"/dataDic"
sys.path.append(path_bench)
config = self.config_path.split('/')[-1]
#####################create task cohort
if self.generate_cohort:
create_cohort(self.config.name.replace(" ","_"),self.mimic_path,config)
#####################Split data into train, test and val
with open(data_dir, 'rb') as fp:
dataDic = pickle.load(fp)
data = pd.DataFrame.from_dict(dataDic)
dict_dir = "./data/dict/"+self.config.name.replace(" ","_")
data=data.T
train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42)
if self.val_size > 0 :
train_data, val_data = train_test_split(train_data, test_size=self.val_size, random_state=42)
val_dic = val_data.to_dict('index')
val_path = dict_dir+'/val_data.pkl'
with open(val_path, 'wb') as f:
pickle.dump(val_dic, f)
train_dic = train_data.to_dict('index')
test_dic = test_data.to_dict('index')
train_path = dict_dir+'/train_data.pkl'
test_path = dict_dir+'/test_data.pkl'
with open(train_path, 'wb') as f:
pickle.dump(train_dic, f)
with open(test_path, 'wb') as f:
pickle.dump(test_dic, f)
return dict_dir
def verif_dim_tensor(self, proc, out, chart, meds, lab):
interv = (self.timeW//self.bucket) + 1
verif=True
if self.feat_proc:
if (len(proc)!= interv):
verif=False
if self.feat_out:
if (len(out)!=interv):
verif=False
if self.feat_chart:
if (len(chart)!=interv):
verif=False
if self.feat_meds:
if (len(meds)!=interv):
verif=False
if self.feat_lab:
if (len(lab)!=interv):
verif=False
return verif
def open_dict(self,cond, proc, out, chart, lab, med):
if cond:
with open("./data/dict/"+self.config.name.replace(" ","_")+"/condVocab", 'rb') as fp:
condDict = pickle.load(fp)
else :
condDict=None
if proc:
with open("./data/dict/"+self.config.name.replace(" ","_")+"/procVocab", 'rb') as fp:
procDict = pickle.load(fp)
else :
procDict=None
if out:
with open("./data/dict/"+self.config.name.replace(" ","_")+"/outVocab", 'rb') as fp:
outDict = pickle.load(fp)
else :
outDict=None
if chart:
with open("./data/dict/"+self.config.name.replace(" ","_")+"/chartVocab", 'rb') as fp:
chartDict = pickle.load(fp)
elif lab:
with open("./data/dict/"+self.config.name.replace(" ","_")+"/labsVocab", 'rb') as fp:
chartDict = pickle.load(fp)
else :
chartDict=None
if med:
with open("./data/dict/"+self.config.name.replace(" ","_")+"/medVocab", 'rb') as fp:
medDict = pickle.load(fp)
else :
medDict=None
return condDict, procDict, outDict, chartDict, medDict
###########################################################RAW##################################################################
def _info_raw(self):
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"gender": datasets.Value("string"),
"ethnicity": datasets.Value("string"),
"insurance": datasets.Value("string"),
"age": datasets.Value("int32"),
"COND": datasets.Sequence(datasets.Value("string")),
"MEDS": {
"signal":
{
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
}
,
"rate":
{
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
}
,
"amount":
{
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
}
},
"PROC": {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
"CHART/LAB":
{
"signal" : {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
"val" : {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
},
"OUT": {
"id": datasets.Sequence(datasets.Value("int32")),
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))
},
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples_raw(self, filepath):
with open(filepath, 'rb') as fp:
dataDic = pickle.load(fp)
for hid, data in dataDic.items():
proc_features = data['Proc']
meds_features = data['Med']
out_features = data['Out']
cond_features = data['Cond']['fids']
eth= data['ethnicity']
age = data['age']
gender = data['gender']
label = data['label']
insurance=data['insurance']
items = list(proc_features.keys())
values =[proc_features[i] for i in items ]
procs = {"id" : items,
"value": values}
items_outs = list(out_features.keys())
values_outs =[out_features[i] for i in items_outs ]
outs = {"id" : items_outs,
"value": values_outs}
if self.data_icu:
chart_features = data['Chart']
else:
chart_features = data['Lab']
#chart signal
if ('signal' in chart_features):
items_chart_sig = list(chart_features['signal'].keys())
values_chart_sig =[chart_features['signal'][i] for i in items_chart_sig ]
chart_sig = {"id" : items_chart_sig,
"value": values_chart_sig}
else:
chart_sig = {"id" : [],
"value": []}
#chart val
if ('val' in chart_features):
items_chart_val = list(chart_features['val'].keys())
values_chart_val =[chart_features['val'][i] for i in items_chart_val ]
chart_val = {"id" : items_chart_val,
"value": values_chart_val}
else:
chart_val = {"id" : [],
"value": []}
charts = {"signal" : chart_sig,
"val" : chart_val}
#meds signal
if ('signal' in meds_features):
items_meds_sig = list(meds_features['signal'].keys())
values_meds_sig =[meds_features['signal'][i] for i in items_meds_sig ]
meds_sig = {"id" : items_meds_sig,
"value": values_meds_sig}
else:
meds_sig = {"id" : [],
"value": []}
#meds rate
if ('rate' in meds_features):
items_meds_rate = list(meds_features['rate'].keys())
values_meds_rate =[meds_features['rate'][i] for i in items_meds_rate ]
meds_rate = {"id" : items_meds_rate,
"value": values_meds_rate}
else:
meds_rate = {"id" : [],
"value": []}
#meds amount
if ('amount' in meds_features):
items_meds_amount = list(meds_features['amount'].keys())
values_meds_amount =[meds_features['amount'][i] for i in items_meds_amount ]
meds_amount = {"id" : items_meds_amount,
"value": values_meds_amount}
else:
meds_amount = {"id" : [],
"value": []}
meds = {"signal" : meds_sig,
"rate" : meds_rate,
"amount" : meds_amount}
yield int(hid), {
"label" : label,
"gender" : gender,
"ethnicity" : eth,
"insurance" : insurance,
"age" : age,
"COND" : cond_features,
"PROC" : procs,
"CHART/LAB" : charts,
"OUT" : outs,
"MEDS" : meds
}
###########################################################ENCODED##################################################################
def _info_encoded(self):
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"features" : datasets.Sequence(datasets.Value("float32")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples_encoded(self, filepath):
path= './data/dict/'+self.config.name.replace(" ","_")+'/ethVocab'
with open(path, 'rb') as fp:
ethVocab = pickle.load(fp)
path= './data/dict/'+self.config.name.replace(" ","_")+'/insVocab'
with open(path, 'rb') as fp:
insVocab = pickle.load(fp)
genVocab = ['<PAD>', 'M', 'F']
gen_encoder = LabelEncoder()
eth_encoder = LabelEncoder()
ins_encoder = LabelEncoder()
gen_encoder.fit(genVocab)
eth_encoder.fit(ethVocab)
ins_encoder.fit(insVocab)
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
df = pd.DataFrame.from_dict(dico, orient='index')
for i, data in df.iterrows():
concat_cols=[]
dyn_df,cond_df,demo=concat_data(data,self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.outDict,self.chartDict,self.condDict,self.procDict,self.medDict)
dyn=dyn_df.copy()
dyn.columns=dyn.columns.droplevel(0)
cols=dyn.columns
time=dyn.shape[0]
for t in range(time):
cols_t = [str(x) + "_"+str(t) for x in cols]
concat_cols.extend(cols_t)
demo['gender']=gen_encoder.transform(demo['gender'])
demo['ethnicity']=eth_encoder.transform(demo['ethnicity'])
demo['insurance']=ins_encoder.transform(demo['insurance'])
label = data['label']
demo=demo.drop(['label'],axis=1)
X= generate_ml(dyn_df,cond_df,demo,concat_cols,self.concat)
X=X.values.tolist()[0]
interv = (self.timeW//self.bucket)+1
size_concat = self.size_cond+ self.size_proc * interv + self.size_meds * interv+ self.size_out * interv+ self.size_chart *interv+ self.size_lab * interv + 4
size_aggreg = self.size_cond+ self.size_proc + self.size_meds+ self.size_out+ self.size_chart+ self.size_lab + 4
if ((self.concat and len(X)==size_concat) or ((not self.concat) and len(X)==size_aggreg)):
yield int(i), {
"label": label,
"features": X,
}
######################################################DEEP###############################################################
def _info_deep(self):
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"DEMO": datasets.Sequence(datasets.Value("int64")),
"COND" : datasets.Sequence(datasets.Value("int64")),
"MEDS" : datasets.Array2D(shape=(None, self.size_meds), dtype='int64') ,
"PROC" : datasets.Array2D(shape=(None, self.size_proc), dtype='int64') ,
"CHART/LAB" : datasets.Array2D(shape=(None, self.size_chart), dtype='int64') ,
"OUT" : datasets.Array2D(shape=(None, self.size_out), dtype='int64') ,
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples_deep(self, filepath):
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
for key, data in dico.items():
stat, demo, meds, chart, out, proc, lab, y = generate_deep(data, self.config.name.replace(" ","_"), self.feat_cond, self.feat_proc, self.feat_out, self.feat_chart, self.feat_meds,self.feat_lab,self.outDict,self.chartDict,self.condDict,self.procDict,self.medDict)
if self.verif_dim_tensor(proc, out, chart, meds, lab):
if self.data_icu:
yield int(key), {
'label': y,
'DEMO': demo,
'COND': stat,
'MEDS': meds,
'PROC': proc,
'CHART/LAB': chart,
'OUT': out,
}
else:
yield int(key), {
'label': y,
'DEMO': demo,
'COND': stat,
'MEDS': meds,
'PROC': proc,
'CHART/LAB': lab,
'OUT': out,
}
######################################################text##############################################################
def _info_text(self):
features = datasets.Features(
{
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]),
"text" : datasets.Value(dtype='string', id=None),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _generate_examples_text(self, filepath):
icd = pd.read_csv('icd10.txt',names=['code','description'],sep='\t')
items= pd.read_csv(self.mimic_path+'/icu/d_items.csv.gz',compression='gzip', header=0)
with open(filepath, 'rb') as fp:
dico = pickle.load(fp)
for key, data in dico.items():
#Diagnosis
if self.feat_cond:
conds = data['Cond']['fids']
cond_text=[]
for code in conds:
desc = icd[icd['code']==code]
if not desc.empty:
cond_text.append(desc['description'].to_string(index=False))
template = 'The patient is diagnosed with {}.'
cond_text = template.format(';'.join(cond_text))
else :
cond_text=''
#chart
if self.feat_chart:
chart = data['Chart']
if chart:
charts=chart['val']
feat=charts.keys()
chart_val=[charts[key] for key in feat]
chart_mean = [round(np.mean(c),3) for c in chart_val]
feat_text = [(items[items['itemid']==f]['label']).to_string(index=False) for f in feat]
template='{} for {}'
chart_text = []
for mean_val, feat_label in zip(chart_mean, feat_text):
text = template.format(mean_val,feat_label)
chart_text.append(text)
chart_text='The chart events mesured are : {}.' + ';'.join(chart_text)
else:
chart_text=''
yield int(key),{
'label' : data['label'],
'text': cond_text+chart_text
}
#############################################################################################################################
def _info(self):
self.path = self.init_cohort()
self.size_cond, self.size_proc, self.size_meds, self.size_out, self.size_chart, self.size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(self.config.name.replace(" ","_"),self.feat_cond,self.feat_proc,self.feat_out,self.feat_chart,self.feat_meds,self.feat_lab)
self.outDict,self.chartDict,self.condDict,self.procDict,self.medDict = self.open_dict(self.feat_cond,self.feat_proc,self.feat_out, self.feat_chart, self.feat_lab, self.feat_meds)
if (self.encoding == 'concat' or self.encoding =='aggreg'):
return self._info_encoded()
elif self.encoding == 'tensor' :
return self._info_deep()
elif self.encoding == 'text' :
return self._info_text()
else:
return self._info_raw()
def _split_generators(self, dl_manager):
data_dir = "./data/dict/"+self.config.name.replace(" ","_")
if self.val_size > 0 :
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/val_data.pkl'}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}),
]
else :
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/train_data.pkl'}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/test_data.pkl'}),
]
def _generate_examples(self, filepath):
if (self.encoding == 'concat' or self.encoding == 'aggreg'):
yield from self._generate_examples_encoded(filepath)
elif self.encoding == 'tensor' :
yield from self._generate_examples_deep(filepath)
elif self.encoding == 'text' :
yield from self._generate_examples_text(filepath)
else :
yield from self._generate_examples_raw(filepath)