|
import csv |
|
import json |
|
import os |
|
import pandas as pd |
|
import datasets |
|
import sys |
|
import pickle |
|
import subprocess |
|
import shutil |
|
from urllib.request import urlretrieve |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.preprocessing import LabelEncoder |
|
import numpy as np |
|
from tqdm import tqdm |
|
import yaml |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Dataset for mimic4 data, by default for the Mortality task. |
|
Available tasks are: Mortality, Length of Stay, Readmission, Phenotype, Mortality Custom, Length of Stay Custom, Readmission Custom, Phenotype Custom. |
|
The data is extracted from the mimic4 database using this pipeline: 'https://github.com/healthylaife/MIMIC-IV-Data-Pipeline/tree/main' |
|
mimic path should have this form : "path/to/mimic4data/from/username/mimiciv/2.2" |
|
If you choose a Custom task provide a configuration file for the Time series. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/thbndi/Mimic4Dataset" |
|
_CITATION = "https://proceedings.mlr.press/v193/gupta22a.html" |
|
_URL = "https://github.com/healthylaife/MIMIC-IV-Data-Pipeline" |
|
_DATA_GEN = 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/data_generation_icu_modify.py' |
|
_DAY_INT= 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/day_intervals_cohort_v22.py' |
|
_COHORT = 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/cohort.py' |
|
_CONFIG_URLS = {'los' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/los.config', |
|
'mortality' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/mortality.config', |
|
'phenotype' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/phenotype.config', |
|
'readmission' : 'https://huggingface.co/datasets/thbndi/Mimic4Dataset/resolve/main/config/readmission.config' |
|
} |
|
|
|
|
|
|
|
def onehot(data,task,feat_cond=False,feat_proc=False,feat_out=False,feat_chart=False,feat_meds=False): |
|
meds=data['Med'] |
|
proc = data['Proc'] |
|
out = data['Out'] |
|
chart = data['Chart'] |
|
cond= data['Cond']['fids'] |
|
|
|
cond_df=pd.DataFrame() |
|
proc_df=pd.DataFrame() |
|
out_df=pd.DataFrame() |
|
chart_df=pd.DataFrame() |
|
meds_df=pd.DataFrame() |
|
|
|
|
|
demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance']) |
|
new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']} |
|
demo = demo.append(new_row, ignore_index=True) |
|
|
|
|
|
if (feat_cond): |
|
|
|
with open("./data/dict/"+task+"/condVocab", 'rb') as fp: |
|
conDict = pickle.load(fp) |
|
conds=pd.DataFrame(conDict,columns=['COND']) |
|
features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND']) |
|
|
|
|
|
if(cond ==[]): |
|
cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND']) |
|
cond_df=cond_df.fillna(0) |
|
else: |
|
cond_df=pd.DataFrame(cond,columns=['COND']) |
|
cond_df['val']=1 |
|
cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True) |
|
cond_df=cond_df.fillna(0) |
|
oneh = cond_df.sum().to_frame().T |
|
combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0) |
|
combined_oneh=combined_df.sum().to_frame().T |
|
cond_df=combined_oneh |
|
|
|
|
|
if (feat_proc): |
|
with open("./data/dict/"+task+"/procVocab", 'rb') as fp: |
|
procDic = pickle.load(fp) |
|
|
|
if proc : |
|
feat=proc.keys() |
|
proc_val=[proc[key] for key in feat] |
|
procedures=pd.DataFrame(procDic,columns=['PROC']) |
|
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) |
|
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) |
|
procs=pd.DataFrame(columns=feat) |
|
for p,v in zip(feat,proc_val): |
|
procs[p]=v |
|
procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns]) |
|
proc_df = pd.concat([features,procs],ignore_index=True).fillna(0) |
|
else: |
|
procedures=pd.DataFrame(procDic,columns=['PROC']) |
|
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) |
|
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) |
|
proc_df=features.fillna(0) |
|
|
|
|
|
if (feat_out): |
|
with open("./data/dict/"+task+"/outVocab", 'rb') as fp: |
|
outDic = pickle.load(fp) |
|
|
|
if out : |
|
feat=out.keys() |
|
out_val=[out[key] for key in feat] |
|
outputs=pd.DataFrame(outDic,columns=['OUT']) |
|
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) |
|
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) |
|
outs=pd.DataFrame(columns=feat) |
|
for o,v in zip(feat,out_val): |
|
outs[o]=v |
|
outs.columns=pd.MultiIndex.from_product([["OUT"], outs.columns]) |
|
out_df = pd.concat([features,outs],ignore_index=True).fillna(0) |
|
else: |
|
outputs=pd.DataFrame(outDic,columns=['OUT']) |
|
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) |
|
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) |
|
out_df=features.fillna(0) |
|
|
|
|
|
if (feat_chart): |
|
with open("./data/dict/"+task+"/chartVocab", 'rb') as fp: |
|
chartDic = pickle.load(fp) |
|
|
|
if chart: |
|
charts=chart['val'] |
|
feat=charts.keys() |
|
chart_val=[charts[key] for key in feat] |
|
charts=pd.DataFrame(chartDic,columns=['CHART']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) |
|
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) |
|
|
|
chart=pd.DataFrame(columns=feat) |
|
for c,v in zip(feat,chart_val): |
|
chart[c]=v |
|
chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns]) |
|
chart_df = pd.concat([features,chart],ignore_index=True).fillna(0) |
|
else: |
|
charts=pd.DataFrame(chartDic,columns=['CHART']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) |
|
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) |
|
chart_df=features.fillna(0) |
|
|
|
|
|
if (feat_meds): |
|
with open("./data/dict/"+task+"/medVocab", 'rb') as fp: |
|
medDic = pickle.load(fp) |
|
|
|
if meds: |
|
feat=meds['signal'].keys() |
|
med_val=[meds['amount'][key] for key in feat] |
|
meds=pd.DataFrame(medDic,columns=['MEDS']) |
|
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) |
|
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) |
|
|
|
med=pd.DataFrame(columns=feat) |
|
for m,v in zip(feat,med_val): |
|
med[m]=v |
|
med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns]) |
|
meds_df = pd.concat([features,med],ignore_index=True).fillna(0) |
|
else: |
|
meds=pd.DataFrame(medDic,columns=['MEDS']) |
|
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) |
|
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) |
|
meds_df=features.fillna(0) |
|
|
|
dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1) |
|
return dyn_df,cond_df,demo |
|
|
|
def getXY(dyn,stat,demo,concat_cols,concat): |
|
X_df=pd.DataFrame() |
|
if concat: |
|
dyna=dyn.copy() |
|
dyna.columns=dyna.columns.droplevel(0) |
|
dyna=dyna.to_numpy() |
|
dyna=dyna.reshape(1,-1) |
|
dyn_df=pd.DataFrame(data=dyna,columns=concat_cols) |
|
else: |
|
dyn_df=pd.DataFrame() |
|
for key in dyn.columns.levels[0]: |
|
dyn_temp=dyn[key] |
|
if ((key=="CHART") or (key=="MEDS")): |
|
agg=dyn_temp.aggregate("mean") |
|
agg=agg.reset_index() |
|
else: |
|
agg=dyn_temp.aggregate("max") |
|
agg=agg.reset_index() |
|
|
|
if dyn_df.empty: |
|
dyn_df=agg |
|
else: |
|
dyn_df=pd.concat([dyn_df,agg],axis=0) |
|
dyn_df=dyn_df.T |
|
dyn_df.columns = dyn_df.iloc[0] |
|
dyn_df=dyn_df.iloc[1:,:] |
|
|
|
X_df=pd.concat([dyn_df,stat],axis=1) |
|
X_df=pd.concat([X_df,demo],axis=1) |
|
return X_df |
|
|
|
def encoding(X_data): |
|
gen_encoder = LabelEncoder() |
|
eth_encoder = LabelEncoder() |
|
ins_encoder = LabelEncoder() |
|
gen_encoder.fit(X_data['gender']) |
|
eth_encoder.fit(X_data['ethnicity']) |
|
ins_encoder.fit(X_data['insurance']) |
|
X_data['gender']=gen_encoder.transform(X_data['gender']) |
|
X_data['ethnicity']=eth_encoder.transform(X_data['ethnicity']) |
|
X_data['insurance']=ins_encoder.transform(X_data['insurance']) |
|
return X_data |
|
|
|
def generate_split(path,task,concat,feat_cond=True,feat_chart=True,feat_proc=True, feat_meds=True, feat_out=False): |
|
with open(path, 'rb') as fp: |
|
dico = pickle.load(fp) |
|
df = pd.DataFrame.from_dict(dico, orient='index') |
|
X_df=pd.DataFrame() |
|
|
|
taskf=task.replace(" ","_") |
|
for _, data in tqdm(df.iterrows(),desc='Encoding Data for '+task+' task'): |
|
concat_cols=[] |
|
sample=data |
|
dyn_df,cond_df,demo=onehot(sample,taskf,feat_cond,feat_chart,feat_proc, feat_meds, feat_out) |
|
dyn=dyn_df.copy() |
|
dyn.columns=dyn.columns.droplevel(0) |
|
cols=dyn.columns |
|
time=dyn.shape[0] |
|
for t in range(time): |
|
cols_t = [str(x) + "_"+str(t) for x in cols] |
|
concat_cols.extend(cols_t) |
|
|
|
X= getXY(dyn_df,cond_df,demo,concat_cols,concat) |
|
if X_df.empty: |
|
X_df=pd.concat([X_df,X],axis=1) |
|
else: |
|
X_df = pd.concat([X_df, X], axis=0) |
|
X_df=X_df.fillna(0) |
|
X_df = encoding(X_df) |
|
|
|
return X_df |
|
|
|
|
|
class Mimic4DatasetConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Mimic4Dataset.""" |
|
|
|
def __init__( |
|
self, |
|
**kwargs, |
|
): |
|
super().__init__(**kwargs) |
|
|
|
class Mimic4Dataset(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def __init__(self, **kwargs): |
|
self.mimic_path = kwargs.pop("mimic_path", None) |
|
self.encoding = kwargs.pop("encoding",True) |
|
self.config_path = kwargs.pop("config_path",None) |
|
self.test_size = kwargs.pop("test_size",0.2) |
|
self.val_size = kwargs.pop("val_size",0.1) |
|
|
|
super().__init__(**kwargs) |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
Mimic4DatasetConfig( |
|
name="Phenotype", |
|
version=VERSION, |
|
description="Dataset for mimic4 Phenotype task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Readmission", |
|
version=VERSION, |
|
description="Dataset for mimic4 Readmission task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Length of Stay", |
|
version=VERSION, |
|
description="Dataset for mimic4 Length of Stay task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Mortality", |
|
version=VERSION, |
|
description="Dataset for mimic4 Mortality task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Phenotype Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Phenotype task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Readmission Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Readmission task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Length of Stay Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Length of Stay task" |
|
), |
|
Mimic4DatasetConfig( |
|
name="Mortality Custom", |
|
version=VERSION, |
|
description="Dataset for mimic4 Custom Mortality task" |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "Mortality" |
|
|
|
def map_dtype(self,dtype): |
|
if pd.api.types.is_integer_dtype(dtype): |
|
return datasets.Value('int64') |
|
elif pd.api.types.is_float_dtype(dtype): |
|
return datasets.Value('float64') |
|
elif pd.api.types.is_string_dtype(dtype): |
|
return datasets.Value('string') |
|
else: |
|
raise ValueError(f"Unsupported dtype: {dtype}") |
|
|
|
def create_cohort(self): |
|
if self.config.name == 'Phenotype' : self.config_path = _CONFIG_URLS['phenotype'] |
|
if self.config.name == 'Readmission' : self.config_path = _CONFIG_URLS['readmission'] |
|
if self.config.name == 'Length of Stay' : self.config_path = _CONFIG_URLS['los'] |
|
if self.config.name == 'Mortality' : self.config_path = _CONFIG_URLS['mortality'] |
|
if self.config.name in ['Phenotype Custom','Readmission Custom','Length of Stay Custom','Mortality Custom'] and self.config.name==None: |
|
raise ValueError('Please provide a config file') |
|
|
|
version = self.mimic_path.split('/')[-1] |
|
m = self.mimic_path.split('/')[-2] |
|
s='/'+m+'/'+version |
|
|
|
current_directory = os.getcwd() |
|
if os.path.exists(os.path.dirname(current_directory)+'/MIMIC-IV-Data-Pipeline-main'): |
|
dir =os.path.dirname(current_directory) |
|
os.chdir(dir) |
|
else: |
|
|
|
dir = self.mimic_path.replace(s,'') |
|
if dir[-1]!='/': |
|
dir=dir+'/' |
|
elif dir=='': |
|
dir="./" |
|
parent_dir = os.path.dirname(self.mimic_path) |
|
os.chdir(parent_dir) |
|
|
|
|
|
repo_url='https://github.com/healthylaife/MIMIC-IV-Data-Pipeline' |
|
if os.path.exists('MIMIC-IV-Data-Pipeline-main'): |
|
path_bench = './MIMIC-IV-Data-Pipeline-main' |
|
else: |
|
path_bench ='./MIMIC-IV-Data-Pipeline-main' |
|
subprocess.run(["git", "clone", repo_url, path_bench]) |
|
os.makedirs(path_bench+'/mimic-iv') |
|
shutil.move(version,path_bench+'/mimic-iv') |
|
|
|
os.chdir(path_bench) |
|
self.mimic_path = './mimic-iv/'+version |
|
|
|
|
|
if self.config_path[0:4] == 'http': |
|
c = self.config_path.split('/')[-1] |
|
file_path, head = urlretrieve(self.config_path,c) |
|
else : |
|
file_path = self.config_path |
|
|
|
|
|
if not os.path.exists('./config'): |
|
os.makedirs('config') |
|
|
|
conf='./config/'+file_path.split('/')[-1] |
|
|
|
if not os.path.exists(conf): |
|
shutil.move(file_path,'./config') |
|
|
|
|
|
if not os.path.exists('./model/data_generation_icu_modify.py'): |
|
file_path, head = urlretrieve(_DATA_GEN, "data_generation_icu_modify.py") |
|
shutil.move(file_path, './model') |
|
|
|
if not os.path.exists('./preprocessing/day_intervals_preproc/day_intervals_cohort_v22.py'): |
|
file_path, head = urlretrieve(_DAY_INT, "day_intervals_cohort_v22.py") |
|
shutil.move(file_path, './preprocessing/day_intervals_preproc') |
|
|
|
file_path, head = urlretrieve(_COHORT, "cohort.py") |
|
if not os.path.exists('cohort.py'): |
|
shutil.move(file_path, './') |
|
|
|
data_dir = "./data/dict/"+self.config.name.replace(" ","_")+"/dataDic" |
|
sys.path.append(path_bench) |
|
config = self.config_path.split('/')[-1] |
|
|
|
script = 'python cohort.py '+ self.config.name.replace(" ","_") +" "+ self.mimic_path+ " "+path_bench+ " "+config |
|
|
|
|
|
if not os.path.exists(data_dir) : |
|
os.system(script) |
|
|
|
config_path='./config/'+config |
|
with open(config_path) as f: |
|
config = yaml.safe_load(f) |
|
feat_cond, feat_chart, feat_proc, feat_meds, feat_out = config['diagnosis'], config['chart'], config['proc'], config['meds'], config['output'] |
|
|
|
with open(data_dir, 'rb') as fp: |
|
dataDic = pickle.load(fp) |
|
data = pd.DataFrame.from_dict(dataDic) |
|
|
|
data=data.T |
|
train_data, test_data = train_test_split(data, test_size=self.test_size, random_state=42) |
|
train_data, val_data = train_test_split(test_data, test_size=self.val_size, random_state=42) |
|
|
|
dict_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
train_dic = train_data.to_dict('index') |
|
test_dic = test_data.to_dict('index') |
|
val_dic = val_data.to_dict('index') |
|
|
|
train_path = dict_dir+'/train_data.pkl' |
|
test_path = dict_dir+'/test_data.pkl' |
|
val_path = dict_dir+'/val_data.pkl' |
|
|
|
with open(train_path, 'wb') as f: |
|
pickle.dump(train_dic, f) |
|
with open(val_path, 'wb') as f: |
|
pickle.dump(val_dic, f) |
|
with open(test_path, 'wb') as f: |
|
pickle.dump(test_dic, f) |
|
|
|
|
|
return feat_cond, feat_chart, feat_proc, feat_meds, feat_out, dict_dir |
|
|
|
|
|
|
|
def _info_raw(self): |
|
features = datasets.Features( |
|
{ |
|
"label": datasets.ClassLabel(num_classes=2,names=["0", "1"]), |
|
"gender": datasets.Value("string"), |
|
"ethnicity": datasets.Value("string"), |
|
"insurance": datasets.Value("string"), |
|
"age": datasets.Value("int32"), |
|
"COND": datasets.Sequence(datasets.Value("string")), |
|
"MEDS": { |
|
"signal": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
, |
|
"rate": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
, |
|
"amount": |
|
{ |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
} |
|
|
|
}, |
|
"PROC": { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
"CHART": |
|
{ |
|
"signal" : { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
"val" : { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
}, |
|
"OUT": { |
|
"id": datasets.Sequence(datasets.Value("int32")), |
|
"value": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) |
|
}, |
|
|
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def __split_generators_raw(self): |
|
|
|
csv_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": csv_dir+'/train_data.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": csv_dir+'/val_data.pkl'}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": csv_dir+'/test_data.pkl'}), |
|
] |
|
|
|
def _generate_examples_raw(self, filepath): |
|
with open(filepath, 'rb') as fp: |
|
dataDic = pickle.load(fp) |
|
for hid, data in dataDic.items(): |
|
proc_features = data['Proc'] |
|
chart_features = data['Chart'] |
|
meds_features = data['Med'] |
|
out_features = data['Out'] |
|
cond_features = data['Cond']['fids'] |
|
eth= data['ethnicity'] |
|
age = data['age'] |
|
gender = data['gender'] |
|
label = data['label'] |
|
insurance=data['insurance'] |
|
|
|
items = list(proc_features.keys()) |
|
values =[proc_features[i] for i in items ] |
|
procs = {"id" : items, |
|
"value": values} |
|
|
|
items_outs = list(out_features.keys()) |
|
values_outs =[out_features[i] for i in items_outs ] |
|
outs = {"id" : items_outs, |
|
"value": values_outs} |
|
|
|
|
|
if ('signal' in chart_features): |
|
items_chart_sig = list(chart_features['signal'].keys()) |
|
values_chart_sig =[chart_features['signal'][i] for i in items_chart_sig ] |
|
chart_sig = {"id" : items_chart_sig, |
|
"value": values_chart_sig} |
|
else: |
|
chart_sig = {"id" : [], |
|
"value": []} |
|
|
|
if ('val' in chart_features): |
|
items_chart_val = list(chart_features['val'].keys()) |
|
values_chart_val =[chart_features['val'][i] for i in items_chart_val ] |
|
chart_val = {"id" : items_chart_val, |
|
"value": values_chart_val} |
|
else: |
|
chart_val = {"id" : [], |
|
"value": []} |
|
|
|
charts = {"signal" : chart_sig, |
|
"val" : chart_val} |
|
|
|
|
|
if ('signal' in meds_features): |
|
items_meds_sig = list(meds_features['signal'].keys()) |
|
values_meds_sig =[meds_features['signal'][i] for i in items_meds_sig ] |
|
meds_sig = {"id" : items_meds_sig, |
|
"value": values_meds_sig} |
|
else: |
|
meds_sig = {"id" : [], |
|
"value": []} |
|
|
|
if ('rate' in meds_features): |
|
items_meds_rate = list(meds_features['rate'].keys()) |
|
values_meds_rate =[meds_features['rate'][i] for i in items_meds_rate ] |
|
meds_rate = {"id" : items_meds_rate, |
|
"value": values_meds_rate} |
|
else: |
|
meds_rate = {"id" : [], |
|
"value": []} |
|
|
|
if ('amount' in meds_features): |
|
items_meds_amount = list(meds_features['amount'].keys()) |
|
values_meds_amount =[meds_features['amount'][i] for i in items_meds_amount ] |
|
meds_amount = {"id" : items_meds_amount, |
|
"value": values_meds_amount} |
|
else: |
|
meds_amount = {"id" : [], |
|
"value": []} |
|
|
|
meds = {"signal" : meds_sig, |
|
"rate" : meds_rate, |
|
"amount" : meds_amount} |
|
|
|
yield int(hid), { |
|
"label" : label, |
|
"gender" : gender, |
|
"ethnicity" : eth, |
|
"insurance" : insurance, |
|
"age" : age, |
|
"COND" : cond_features, |
|
"PROC" : procs, |
|
"CHART" : charts, |
|
"OUT" : outs, |
|
"MEDS" : meds |
|
} |
|
|
|
|
|
|
|
|
|
def _info_encoded(self,X_encoded): |
|
columns = {col: self.map_dtype(X_encoded[col].dtype) for col in X_encoded.columns} |
|
features = datasets.Features(columns) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def __split_generators_encoded(self): |
|
data_dir = "./data/dict/"+self.config.name.replace(" ","_") |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir+'/X_train_encoded.csv'}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir+'/X_val_encoded.csv'}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir+'/X_test_encoded.csv'}), |
|
] |
|
|
|
def _generate_examples_encoded(self, filepath): |
|
df = pd.read_csv(filepath, header=0) |
|
for i, row in df.iterrows(): |
|
yield i, row.to_dict() |
|
|
|
|
|
|
|
def _info(self): |
|
self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out,self.path = self.create_cohort() |
|
|
|
if self.encoding : |
|
X_train_encoded=generate_split(self.path+'/train_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
X_test_encoded=generate_split(self.path+'/test_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
X_val_encoded=generate_split(self.path+'/val_data.pkl',self.config.name,True,self.feat_cond, self.feat_chart, self.feat_proc, self.feat_meds, self.feat_out) |
|
|
|
X_train_encoded.to_csv(self.path+"/X_train_encoded.csv", index=False) |
|
X_test_encoded.to_csv(self.path+"/X_test_encoded.csv", index=False) |
|
X_val_encoded.to_csv(self.path+"/X_val_encoded.csv", index=False) |
|
return self._info_encoded(X_train_encoded) |
|
else: |
|
return self._info_raw() |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.encoding : |
|
return self.__split_generators_encoded() |
|
else: |
|
return self.__split_generators_raw() |
|
|
|
def _generate_examples(self, filepath): |
|
if not self.encoding : |
|
yield from self._generate_examples_raw(filepath) |
|
else: |
|
yield from self._generate_examples_encoded(filepath) |
|
|
|
|