|
import pandas as pd |
|
import pickle |
|
import numpy as np |
|
import torch |
|
|
|
|
|
def create_vocab(file,task): |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
condVocab = pickle.load(fp) |
|
condVocabDict={} |
|
condVocabDict[0]=0 |
|
for val in range(len(condVocab)): |
|
condVocabDict[condVocab[val]]= val+1 |
|
|
|
return condVocabDict |
|
|
|
def gender_vocab(): |
|
genderVocabDict={} |
|
genderVocabDict['<PAD>']=0 |
|
genderVocabDict['M']=1 |
|
genderVocabDict['F']=2 |
|
|
|
return genderVocabDict |
|
|
|
def vocab(task,diag_flag,proc_flag,out_flag,chart_flag,med_flag,lab_flag): |
|
condVocabDict={} |
|
procVocabDict={} |
|
medVocabDict={} |
|
outVocabDict={} |
|
chartVocabDict={} |
|
labVocabDict={} |
|
ethVocabDict={} |
|
ageVocabDict={} |
|
genderVocabDict={} |
|
insVocabDict={} |
|
|
|
ethVocabDict=create_vocab('ethVocab',task) |
|
with open('./data/dict/'+task+'/ethVocabDict', 'wb') as fp: |
|
pickle.dump(ethVocabDict, fp) |
|
|
|
ageVocabDict=create_vocab('ageVocab',task) |
|
with open('./data/dict/'+task+'/ageVocabDict', 'wb') as fp: |
|
pickle.dump(ageVocabDict, fp) |
|
|
|
genderVocabDict=gender_vocab() |
|
with open('./data/dict/'+task+'/genderVocabDict', 'wb') as fp: |
|
pickle.dump(genderVocabDict, fp) |
|
|
|
insVocabDict=create_vocab('insVocab',task) |
|
with open('./data/dict/'+task+'/insVocabDict', 'wb') as fp: |
|
pickle.dump(insVocabDict, fp) |
|
|
|
if diag_flag: |
|
file='condVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
condVocabDict = pickle.load(fp) |
|
if proc_flag: |
|
file='procVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
procVocabDict = pickle.load(fp) |
|
if med_flag: |
|
file='medVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
medVocabDict = pickle.load(fp) |
|
if out_flag: |
|
file='outVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
outVocabDict = pickle.load(fp) |
|
if chart_flag: |
|
file='chartVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
chartVocabDict = pickle.load(fp) |
|
if lab_flag: |
|
file='labsVocab' |
|
with open ('./data/dict/'+task+'/'+file, 'rb') as fp: |
|
labVocabDict = pickle.load(fp) |
|
|
|
return len(condVocabDict),len(procVocabDict),len(medVocabDict),len(outVocabDict),len(chartVocabDict),len(labVocabDict),ethVocabDict,genderVocabDict,ageVocabDict,insVocabDict |
|
|
|
def concat_data(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab): |
|
meds=data['Med'] |
|
proc = data['Proc'] |
|
out = data['Out'] |
|
chart = data['Chart'] |
|
cond= data['Cond']['fids'] |
|
|
|
cond_df=pd.DataFrame() |
|
proc_df=pd.DataFrame() |
|
out_df=pd.DataFrame() |
|
chart_df=pd.DataFrame() |
|
meds_df=pd.DataFrame() |
|
|
|
|
|
demo=pd.DataFrame(columns=['Age','gender','ethnicity','label','insurance']) |
|
new_row = {'Age': data['age'], 'gender': data['gender'], 'ethnicity': data['ethnicity'], 'label': data['label'], 'insurance': data['insurance']} |
|
demo = demo.append(new_row, ignore_index=True) |
|
|
|
|
|
if (feat_cond): |
|
|
|
with open("./data/dict/"+task+"/condVocab", 'rb') as fp: |
|
conDict = pickle.load(fp) |
|
conds=pd.DataFrame(conDict,columns=['COND']) |
|
features=pd.DataFrame(np.zeros([1,len(conds)]),columns=conds['COND']) |
|
|
|
|
|
if(cond ==[]): |
|
cond_df=pd.DataFrame(np.zeros([1,len(features)]),columns=features['COND']) |
|
cond_df=cond_df.fillna(0) |
|
else: |
|
cond_df=pd.DataFrame(cond,columns=['COND']) |
|
cond_df['val']=1 |
|
cond_df=(cond_df.drop_duplicates()).pivot(columns='COND',values='val').reset_index(drop=True) |
|
cond_df=cond_df.fillna(0) |
|
oneh = cond_df.sum().to_frame().T |
|
combined_df = pd.concat([features,oneh],ignore_index=True).fillna(0) |
|
combined_oneh=combined_df.sum().to_frame().T |
|
cond_df=combined_oneh |
|
for c in cond_df.columns : |
|
if c not in features: |
|
cond_df=cond_df.drop(columns=[c]) |
|
|
|
|
|
if (feat_proc): |
|
with open("./data/dict/"+task+"/procVocab", 'rb') as fp: |
|
procDic = pickle.load(fp) |
|
|
|
if proc : |
|
feat=proc.keys() |
|
proc_val=[proc[key] for key in feat] |
|
procedures=pd.DataFrame(procDic,columns=['PROC']) |
|
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) |
|
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) |
|
procs=pd.DataFrame(columns=feat) |
|
for p,v in zip(feat,proc_val): |
|
procs[p]=v |
|
procs.columns=pd.MultiIndex.from_product([["PROC"], procs.columns]) |
|
proc_df = pd.concat([features,procs],ignore_index=True).fillna(0) |
|
else: |
|
procedures=pd.DataFrame(procDic,columns=['PROC']) |
|
features=pd.DataFrame(np.zeros([1,len(procedures)]),columns=procedures['PROC']) |
|
features.columns=pd.MultiIndex.from_product([["PROC"], features.columns]) |
|
proc_df=features.fillna(0) |
|
|
|
|
|
if (feat_out): |
|
with open("./data/dict/"+task+"/outVocab", 'rb') as fp: |
|
outDic = pickle.load(fp) |
|
|
|
if out : |
|
feat=out.keys() |
|
out_val=[out[key] for key in feat] |
|
outputs=pd.DataFrame(outDic,columns=['OUT']) |
|
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) |
|
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) |
|
outs=pd.DataFrame(columns=feat) |
|
for o,v in zip(feat,out_val): |
|
outs[o]=v |
|
outs.columns=pd.MultiIndex.from_product([["OUT"], outs.columns]) |
|
out_df = pd.concat([features,outs],ignore_index=True).fillna(0) |
|
else: |
|
outputs=pd.DataFrame(outDic,columns=['OUT']) |
|
features=pd.DataFrame(np.zeros([1,len(outputs)]),columns=outputs['OUT']) |
|
features.columns=pd.MultiIndex.from_product([["OUT"], features.columns]) |
|
out_df=features.fillna(0) |
|
|
|
|
|
if (feat_chart): |
|
with open("./data/dict/"+task+"/chartVocab", 'rb') as fp: |
|
chartDic = pickle.load(fp) |
|
|
|
if chart: |
|
charts=chart['val'] |
|
feat=charts.keys() |
|
chart_val=[charts[key] for key in feat] |
|
charts=pd.DataFrame(chartDic,columns=['CHART']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) |
|
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) |
|
|
|
chart=pd.DataFrame(columns=feat) |
|
for c,v in zip(feat,chart_val): |
|
chart[c]=v |
|
chart.columns=pd.MultiIndex.from_product([["CHART"], chart.columns]) |
|
chart_df = pd.concat([features,chart],ignore_index=True).fillna(0) |
|
else: |
|
charts=pd.DataFrame(chartDic,columns=['CHART']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['CHART']) |
|
features.columns=pd.MultiIndex.from_product([["CHART"], features.columns]) |
|
chart_df=features.fillna(0) |
|
|
|
|
|
if (feat_lab): |
|
with open("./data/dict/"+task+"/labsVocab", 'rb') as fp: |
|
chartDic = pickle.load(fp) |
|
|
|
if chart: |
|
charts=chart['val'] |
|
feat=charts.keys() |
|
chart_val=[charts[key] for key in feat] |
|
charts=pd.DataFrame(chartDic,columns=['LAB']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB']) |
|
features.columns=pd.MultiIndex.from_product([["LAB"], features.columns]) |
|
|
|
chart=pd.DataFrame(columns=feat) |
|
for c,v in zip(feat,chart_val): |
|
chart[c]=v |
|
chart.columns=pd.MultiIndex.from_product([["LAB"], chart.columns]) |
|
chart_df = pd.concat([features,chart],ignore_index=True).fillna(0) |
|
else: |
|
charts=pd.DataFrame(chartDic,columns=['LAB']) |
|
features=pd.DataFrame(np.zeros([1,len(charts)]),columns=charts['LAB']) |
|
features.columns=pd.MultiIndex.from_product([["LAB"], features.columns]) |
|
chart_df=features.fillna(0) |
|
|
|
|
|
if (feat_meds): |
|
with open("./data/dict/"+task+"/medVocab", 'rb') as fp: |
|
medDic = pickle.load(fp) |
|
|
|
if meds: |
|
feat=meds['signal'].keys() |
|
med_val=[meds['amount'][key] for key in feat] |
|
meds=pd.DataFrame(medDic,columns=['MEDS']) |
|
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) |
|
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) |
|
|
|
med=pd.DataFrame(columns=feat) |
|
for m,v in zip(feat,med_val): |
|
med[m]=v |
|
med.columns=pd.MultiIndex.from_product([["MEDS"], med.columns]) |
|
meds_df = pd.concat([features,med],ignore_index=True).fillna(0) |
|
else: |
|
meds=pd.DataFrame(medDic,columns=['MEDS']) |
|
features=pd.DataFrame(np.zeros([1,len(meds)]),columns=meds['MEDS']) |
|
features.columns=pd.MultiIndex.from_product([["MEDS"], features.columns]) |
|
meds_df=features.fillna(0) |
|
|
|
dyn_df = pd.concat([meds_df,proc_df,out_df,chart_df], axis=1) |
|
return dyn_df,cond_df,demo |
|
|
|
|
|
|
|
def generate_deep(data,task,feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab): |
|
stat_df = torch.zeros(size=(1,0)) |
|
demo_df = torch.zeros(size=(1,0)) |
|
meds = torch.zeros(size=(0,0)) |
|
charts = torch.zeros(size=(0,0)) |
|
proc = torch.zeros(size=(0,0)) |
|
out = torch.zeros(size=(0,0)) |
|
lab = torch.zeros(size=(0,0)) |
|
stat_df = torch.zeros(size=(1,0)) |
|
demo_df = torch.zeros(size=(1,0)) |
|
|
|
size_cond, size_proc, size_meds, size_out, size_chart, size_lab, eth_vocab,gender_vocab,age_vocab,ins_vocab=vocab(task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,False) |
|
dyn,cond_df,demo=concat_data(data,task.replace(" ","_"),feat_cond,feat_proc,feat_out,feat_chart,feat_meds,feat_lab) |
|
if feat_chart: |
|
charts = dyn['CHART'] |
|
charts=charts.to_numpy() |
|
charts = torch.tensor(charts, dtype=torch.long) |
|
charts = charts.tolist() |
|
|
|
if feat_meds: |
|
meds = dyn['MEDS'] |
|
meds=meds.to_numpy() |
|
meds = torch.tensor(meds, dtype=torch.long) |
|
meds = meds.tolist() |
|
|
|
if feat_proc: |
|
proc = dyn['PROC'] |
|
proc=proc.to_numpy() |
|
proc = torch.tensor(proc, dtype=torch.long) |
|
proc = proc.tolist() |
|
|
|
if feat_out: |
|
out = dyn['OUT'] |
|
out=out.to_numpy() |
|
out = torch.tensor(out, dtype=torch.long) |
|
out = out.tolist() |
|
|
|
if feat_lab: |
|
lab = dyn['LAB'] |
|
lab=lab.to_numpy() |
|
lab = torch.tensor(lab, dtype=torch.long) |
|
lab = lab.tolist() |
|
|
|
if feat_cond: |
|
stat=cond_df |
|
stat = stat.to_numpy() |
|
stat = torch.tensor(stat) |
|
if stat_df[0].nelement(): |
|
stat_df = torch.cat((stat_df,stat),0) |
|
else: |
|
stat_df = stat |
|
|
|
stat_df = torch.tensor(stat_df) |
|
stat_df = stat_df.type(torch.LongTensor) |
|
stat_df = stat_df.squeeze() |
|
|
|
|
|
y = int(demo['label']) |
|
y_df = torch.tensor(y) |
|
y_df = y_df.type(torch.LongTensor) |
|
|
|
demo["gender"].replace(gender_vocab, inplace=True) |
|
demo["ethnicity"].replace(eth_vocab, inplace=True) |
|
demo["insurance"].replace(ins_vocab, inplace=True) |
|
demo["Age"].replace(age_vocab, inplace=True) |
|
demo=demo[["gender","ethnicity","insurance","Age"]] |
|
demo = demo.values |
|
demo = torch.tensor(demo) |
|
if demo_df[0].nelement(): |
|
demo_df = torch.cat((demo_df,demo),0) |
|
else: |
|
demo_df = demo |
|
demo_df = torch.tensor(demo_df) |
|
demo_df = demo_df.type(torch.LongTensor) |
|
demo_df=demo_df.squeeze() |
|
|
|
return stat_df, demo_df, meds, charts, out, proc, lab, y |
|
|
|
|
|
def generate_ml(dyn,stat,demo,concat_cols,concat): |
|
X_df=pd.DataFrame() |
|
if concat: |
|
dyna=dyn.copy() |
|
dyna.columns=dyna.columns.droplevel(0) |
|
dyna=dyna.to_numpy() |
|
dyna=np.nan_to_num(dyna, copy=False) |
|
dyna=dyna.reshape(1,-1) |
|
dyn_df=pd.DataFrame(data=dyna,columns=concat_cols) |
|
else: |
|
dyn_df=pd.DataFrame() |
|
for key in dyn.columns.levels[0]: |
|
dyn_temp=dyn[key] |
|
if ((key=="CHART") or (key=="MEDS")): |
|
agg=dyn_temp.aggregate("mean") |
|
agg=agg.reset_index() |
|
else: |
|
agg=dyn_temp.aggregate("max") |
|
agg=agg.reset_index() |
|
|
|
if dyn_df.empty: |
|
dyn_df=agg |
|
else: |
|
dyn_df=pd.concat([dyn_df,agg],axis=0) |
|
dyn_df=dyn_df.T |
|
dyn_df.columns = dyn_df.iloc[0] |
|
dyn_df=dyn_df.iloc[1:,:] |
|
|
|
X_df=pd.concat([dyn_df,stat],axis=1) |
|
X_df=pd.concat([X_df,demo],axis=1) |
|
return X_df |